i965: Move intel_context::gen and gt fields to brw_context.

Most functions no longer use intel_context, so this patch additionally
removes the local "intel" variables to avoid compiler warnings.

Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Acked-by: Chris Forbes <chrisf@ijw.co.nz>
Acked-by: Paul Berry <stereotype441@gmail.com>
Acked-by: Anuj Phogat <anuj.phogat@gmail.com>
This commit is contained in:
Kenneth Graunke 2013-07-06 00:36:46 -07:00
parent 2e26afb37b
commit 53631be4eb
67 changed files with 483 additions and 622 deletions

View File

@ -191,9 +191,7 @@ intel_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
void void
brw_blorp_exec(struct brw_context *brw, const brw_blorp_params *params) brw_blorp_exec(struct brw_context *brw, const brw_blorp_params *params)
{ {
struct intel_context *intel = &brw->intel; switch (brw->gen) {
switch (intel->gen) {
case 6: case 6:
gen6_blorp_exec(brw, params); gen6_blorp_exec(brw, params);
break; break;

View File

@ -359,7 +359,7 @@ brw_blorp_copytexsubimage(struct brw_context *brw,
struct intel_mipmap_tree *dst_mt = intel_image->mt; struct intel_mipmap_tree *dst_mt = intel_image->mt;
/* BLORP is not supported before Gen6. */ /* BLORP is not supported before Gen6. */
if (intel->gen < 6) if (brw->gen < 6)
return false; return false;
if (!color_formats_match(src_mt->format, dst_mt->format)) { if (!color_formats_match(src_mt->format, dst_mt->format)) {
@ -435,10 +435,8 @@ brw_blorp_framebuffer(struct brw_context *brw,
GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
GLbitfield mask, GLenum filter) GLbitfield mask, GLenum filter)
{ {
struct intel_context *intel = &brw->intel;
/* BLORP is not supported before Gen6. */ /* BLORP is not supported before Gen6. */
if (intel->gen < 6) if (brw->gen < 6)
return mask; return mask;
static GLbitfield buffer_bits[] = { static GLbitfield buffer_bits[] = {
@ -844,7 +842,7 @@ brw_blorp_blit_program::compile(struct brw_context *brw,
* irrelevant, because we are going to fetch all samples. * irrelevant, because we are going to fetch all samples.
*/ */
if (key->blend && !key->blit_scaled) { if (key->blend && !key->blit_scaled) {
if (brw->intel.gen == 6) { if (brw->gen == 6) {
/* Gen6 hardware an automatically blend using the SAMPLE message */ /* Gen6 hardware an automatically blend using the SAMPLE message */
single_to_blend(); single_to_blend();
sample(texture_data[0]); sample(texture_data[0]);
@ -1802,7 +1800,7 @@ brw_blorp_blit_program::texel_fetch(struct brw_reg dst)
SAMPLER_MESSAGE_ARG_V_INT SAMPLER_MESSAGE_ARG_V_INT
}; };
switch (brw->intel.gen) { switch (brw->gen) {
case 6: case 6:
texture_lookup(dst, GEN5_SAMPLER_MESSAGE_SAMPLE_LD, gen6_args, texture_lookup(dst, GEN5_SAMPLER_MESSAGE_SAMPLE_LD, gen6_args,
s_is_zero ? 2 : 5); s_is_zero ? 2 : 5);
@ -2023,7 +2021,7 @@ compute_msaa_layout_for_pipeline(struct brw_context *brw, unsigned num_samples,
} }
/* Prior to Gen7, all MSAA surfaces use IMS layout. */ /* Prior to Gen7, all MSAA surfaces use IMS layout. */
if (brw->intel.gen == 6) { if (brw->gen == 6) {
assert(true_layout == INTEL_MSAA_LAYOUT_IMS); assert(true_layout == INTEL_MSAA_LAYOUT_IMS);
} }
@ -2078,7 +2076,7 @@ brw_blorp_blit_params::brw_blorp_blit_params(struct brw_context *brw,
break; break;
} }
if (brw->intel.gen > 6) { if (brw->gen > 6) {
/* Gen7's rendering hardware only supports the IMS layout for depth and /* Gen7's rendering hardware only supports the IMS layout for depth and
* stencil render targets. Blorp always maps its destination surface as * stencil render targets. Blorp always maps its destination surface as
* a color render target (even if it's actually a depth or stencil * a color render target (even if it's actually a depth or stencil

View File

@ -105,13 +105,12 @@ static bool
brw_fast_clear_depth(struct gl_context *ctx) brw_fast_clear_depth(struct gl_context *ctx)
{ {
struct brw_context *brw = brw_context(ctx); struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct gl_framebuffer *fb = ctx->DrawBuffer; struct gl_framebuffer *fb = ctx->DrawBuffer;
struct intel_renderbuffer *depth_irb = struct intel_renderbuffer *depth_irb =
intel_get_renderbuffer(fb, BUFFER_DEPTH); intel_get_renderbuffer(fb, BUFFER_DEPTH);
struct intel_mipmap_tree *mt = depth_irb->mt; struct intel_mipmap_tree *mt = depth_irb->mt;
if (intel->gen < 6) if (brw->gen < 6)
return false; return false;
if (!intel_renderbuffer_has_hiz(depth_irb)) if (!intel_renderbuffer_has_hiz(depth_irb))
@ -155,7 +154,7 @@ brw_fast_clear_depth(struct gl_context *ctx)
* width of the map (LOD0) is not multiple of 16, fast clear * width of the map (LOD0) is not multiple of 16, fast clear
* optimization must be disabled. * optimization must be disabled.
*/ */
if (intel->gen == 6 && (mt->level[depth_irb->mt_level].width % 16) != 0) if (brw->gen == 6 && (mt->level[depth_irb->mt_level].width % 16) != 0)
return false; return false;
/* FALLTHROUGH */ /* FALLTHROUGH */
@ -184,7 +183,7 @@ brw_fast_clear_depth(struct gl_context *ctx)
intel_hiz_exec(brw, mt, depth_irb->mt_level, depth_irb->mt_layer, intel_hiz_exec(brw, mt, depth_irb->mt_level, depth_irb->mt_layer,
GEN6_HIZ_OP_DEPTH_CLEAR); GEN6_HIZ_OP_DEPTH_CLEAR);
if (intel->gen == 6) { if (brw->gen == 6) {
/* From the Sandy Bridge PRM, volume 2 part 1, page 314: /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
* *
* "DevSNB, DevSNB-B{W/A}]: Depth buffer clear pass must be followed * "DevSNB, DevSNB-B{W/A}]: Depth buffer clear pass must be followed
@ -231,7 +230,7 @@ brw_clear(struct gl_context *ctx, GLbitfield mask)
} }
/* BLORP is currently only supported on Gen6+. */ /* BLORP is currently only supported on Gen6+. */
if (intel->gen >= 6) { if (brw->gen >= 6) {
if (mask & BUFFER_BITS_COLOR) { if (mask & BUFFER_BITS_COLOR) {
if (brw_blorp_clear_color(brw, fb, partial_clear)) { if (brw_blorp_clear_color(brw, fb, partial_clear)) {
debug_mask("blorp color", mask & BUFFER_BITS_COLOR); debug_mask("blorp color", mask & BUFFER_BITS_COLOR);

View File

@ -51,7 +51,6 @@
static void compile_clip_prog( struct brw_context *brw, static void compile_clip_prog( struct brw_context *brw,
struct brw_clip_prog_key *key ) struct brw_clip_prog_key *key )
{ {
struct intel_context *intel = &brw->intel;
struct brw_clip_compile c; struct brw_clip_compile c;
const GLuint *program; const GLuint *program;
void *mem_ctx; void *mem_ctx;
@ -117,7 +116,7 @@ static void compile_clip_prog( struct brw_context *brw,
printf("clip:\n"); printf("clip:\n");
for (i = 0; i < program_size / sizeof(struct brw_instruction); i++) for (i = 0; i < program_size / sizeof(struct brw_instruction); i++)
brw_disasm(stdout, &((struct brw_instruction *)program)[i], brw_disasm(stdout, &((struct brw_instruction *)program)[i],
intel->gen); brw->gen);
printf("\n"); printf("\n");
} }
@ -153,7 +152,7 @@ brw_upload_clip_prog(struct brw_context *brw)
/* _NEW_TRANSFORM (also part of VUE map)*/ /* _NEW_TRANSFORM (also part of VUE map)*/
key.nr_userclip = _mesa_bitcount_64(ctx->Transform.ClipPlanesEnabled); key.nr_userclip = _mesa_bitcount_64(ctx->Transform.ClipPlanesEnabled);
if (intel->gen == 5) if (brw->gen == 5)
key.clip_mode = BRW_CLIPMODE_KERNEL_CLIP; key.clip_mode = BRW_CLIPMODE_KERNEL_CLIP;
else else
key.clip_mode = BRW_CLIPMODE_NORMAL; key.clip_mode = BRW_CLIPMODE_NORMAL;

View File

@ -45,7 +45,7 @@
static void brw_clip_line_alloc_regs( struct brw_clip_compile *c ) static void brw_clip_line_alloc_regs( struct brw_clip_compile *c )
{ {
struct intel_context *intel = &c->func.brw->intel; struct brw_context *brw = c->func.brw;
GLuint i = 0,j; GLuint i = 0,j;
/* Register usage is static, precompute here: /* Register usage is static, precompute here:
@ -85,7 +85,7 @@ static void brw_clip_line_alloc_regs( struct brw_clip_compile *c )
i++; i++;
} }
if (intel->gen == 5) { if (brw->gen == 5) {
c->reg.ff_sync = retype(brw_vec1_grf(i, 0), BRW_REGISTER_TYPE_UD); c->reg.ff_sync = retype(brw_vec1_grf(i, 0), BRW_REGISTER_TYPE_UD);
i++; i++;
} }

View File

@ -105,7 +105,7 @@ brw_upload_clip_unit(struct brw_context *brw)
/* Although up to 16 concurrent Clip threads are allowed on Ironlake, /* Although up to 16 concurrent Clip threads are allowed on Ironlake,
* only 2 threads can output VUEs at a time. * only 2 threads can output VUEs at a time.
*/ */
if (intel->gen == 5) if (brw->gen == 5)
clip->thread4.max_threads = 16 - 1; clip->thread4.max_threads = 16 - 1;
else else
clip->thread4.max_threads = 2 - 1; clip->thread4.max_threads = 2 - 1;

View File

@ -50,7 +50,7 @@ static void release_tmps( struct brw_clip_compile *c )
void brw_clip_tri_alloc_regs( struct brw_clip_compile *c, void brw_clip_tri_alloc_regs( struct brw_clip_compile *c,
GLuint nr_verts ) GLuint nr_verts )
{ {
struct intel_context *intel = &c->func.brw->intel; struct brw_context *brw = c->func.brw;
GLuint i = 0,j; GLuint i = 0,j;
/* Register usage is static, precompute here: /* Register usage is static, precompute here:
@ -122,7 +122,7 @@ void brw_clip_tri_alloc_regs( struct brw_clip_compile *c,
c->reg.vertex_src_mask = retype(brw_vec1_grf(i, 0), BRW_REGISTER_TYPE_UD); c->reg.vertex_src_mask = retype(brw_vec1_grf(i, 0), BRW_REGISTER_TYPE_UD);
i++; i++;
if (intel->gen == 5) { if (brw->gen == 5) {
c->reg.ff_sync = retype(brw_vec1_grf(i, 0), BRW_REGISTER_TYPE_UD); c->reg.ff_sync = retype(brw_vec1_grf(i, 0), BRW_REGISTER_TYPE_UD);
i++; i++;
} }

View File

@ -362,11 +362,10 @@ void brw_clip_init_clipmask( struct brw_clip_compile *c )
void brw_clip_ff_sync(struct brw_clip_compile *c) void brw_clip_ff_sync(struct brw_clip_compile *c)
{ {
struct intel_context *intel = &c->func.brw->intel; struct brw_compile *p = &c->func;
struct brw_context *brw = p->brw;
if (intel->gen == 5) {
struct brw_compile *p = &c->func;
if (brw->gen == 5) {
brw_set_conditionalmod(p, BRW_CONDITIONAL_Z); brw_set_conditionalmod(p, BRW_CONDITIONAL_Z);
brw_AND(p, brw_null_reg(), c->reg.ff_sync, brw_imm_ud(0x1)); brw_AND(p, brw_null_reg(), c->reg.ff_sync, brw_imm_ud(0x1));
brw_IF(p, BRW_EXECUTE_1); brw_IF(p, BRW_EXECUTE_1);
@ -387,9 +386,9 @@ void brw_clip_ff_sync(struct brw_clip_compile *c)
void brw_clip_init_ff_sync(struct brw_clip_compile *c) void brw_clip_init_ff_sync(struct brw_clip_compile *c)
{ {
struct intel_context *intel = &c->func.brw->intel; struct brw_context *brw = c->func.brw;
if (intel->gen == 5) { if (brw->gen == 5) {
struct brw_compile *p = &c->func; struct brw_compile *p = &c->func;
brw_MOV(p, c->reg.ff_sync, brw_imm_ud(0)); brw_MOV(p, c->reg.ff_sync, brw_imm_ud(0));

View File

@ -62,11 +62,11 @@ static size_t
brw_query_samples_for_format(struct gl_context *ctx, GLenum target, brw_query_samples_for_format(struct gl_context *ctx, GLenum target,
GLenum internalFormat, int samples[16]) GLenum internalFormat, int samples[16])
{ {
struct intel_context *intel = intel_context(ctx); struct brw_context *brw = brw_context(ctx);
(void) target; (void) target;
switch (intel->gen) { switch (brw->gen) {
case 7: case 7:
samples[0] = 8; samples[0] = 8;
samples[1] = 4; samples[1] = 4;
@ -136,7 +136,7 @@ brw_initialize_context_constants(struct brw_context *brw)
ctx->Const.Max3DTextureLevels = 9; ctx->Const.Max3DTextureLevels = 9;
ctx->Const.MaxCubeTextureLevels = 12; ctx->Const.MaxCubeTextureLevels = 12;
if (intel->gen >= 7) if (brw->gen >= 7)
ctx->Const.MaxArrayTextureLayers = 2048; ctx->Const.MaxArrayTextureLayers = 2048;
else else
ctx->Const.MaxArrayTextureLayers = 512; ctx->Const.MaxArrayTextureLayers = 512;
@ -167,12 +167,12 @@ brw_initialize_context_constants(struct brw_context *brw)
ctx->Const.MaxTransformFeedbackSeparateComponents = ctx->Const.MaxTransformFeedbackSeparateComponents =
BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS; BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
if (intel->gen == 6) { if (brw->gen == 6) {
ctx->Const.MaxSamples = 4; ctx->Const.MaxSamples = 4;
ctx->Const.MaxColorTextureSamples = 4; ctx->Const.MaxColorTextureSamples = 4;
ctx->Const.MaxDepthTextureSamples = 4; ctx->Const.MaxDepthTextureSamples = 4;
ctx->Const.MaxIntegerSamples = 4; ctx->Const.MaxIntegerSamples = 4;
} else if (intel->gen >= 7) { } else if (brw->gen >= 7) {
ctx->Const.MaxSamples = 8; ctx->Const.MaxSamples = 8;
ctx->Const.MaxColorTextureSamples = 8; ctx->Const.MaxColorTextureSamples = 8;
ctx->Const.MaxDepthTextureSamples = 8; ctx->Const.MaxDepthTextureSamples = 8;
@ -191,7 +191,7 @@ brw_initialize_context_constants(struct brw_context *brw)
ctx->Const.MaxPointSizeAA = 255.0; ctx->Const.MaxPointSizeAA = 255.0;
ctx->Const.PointSizeGranularity = 1.0; ctx->Const.PointSizeGranularity = 1.0;
if (intel->gen >= 6) if (brw->gen >= 6)
ctx->Const.MaxClipPlanes = 8; ctx->Const.MaxClipPlanes = 8;
ctx->Const.VertexProgram.MaxNativeInstructions = 16 * 1024; ctx->Const.VertexProgram.MaxNativeInstructions = 16 * 1024;
@ -235,7 +235,7 @@ brw_initialize_context_constants(struct brw_context *brw)
* that affect provoking vertex decision. Always use last vertex * that affect provoking vertex decision. Always use last vertex
* convention for quad primitive which works as expected for now. * convention for quad primitive which works as expected for now.
*/ */
if (intel->gen >= 6) if (brw->gen >= 6)
ctx->Const.QuadsFollowProvokingVertexConvention = false; ctx->Const.QuadsFollowProvokingVertexConvention = false;
ctx->Const.NativeIntegers = true; ctx->Const.NativeIntegers = true;
@ -250,7 +250,7 @@ brw_initialize_context_constants(struct brw_context *brw)
/* We want the GLSL compiler to emit code that uses condition codes */ /* We want the GLSL compiler to emit code that uses condition codes */
for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) { for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
ctx->ShaderCompilerOptions[i].MaxIfDepth = intel->gen < 6 ? 16 : UINT_MAX; ctx->ShaderCompilerOptions[i].MaxIfDepth = brw->gen < 6 ? 16 : UINT_MAX;
ctx->ShaderCompilerOptions[i].EmitCondCodes = true; ctx->ShaderCompilerOptions[i].EmitCondCodes = true;
ctx->ShaderCompilerOptions[i].EmitNoNoise = true; ctx->ShaderCompilerOptions[i].EmitNoNoise = true;
ctx->ShaderCompilerOptions[i].EmitNoMainReturn = true; ctx->ShaderCompilerOptions[i].EmitNoMainReturn = true;
@ -291,7 +291,7 @@ brwCreateContext(int api,
/* brwInitVtbl needs to know the chipset generation so that it can set the /* brwInitVtbl needs to know the chipset generation so that it can set the
* right pointers. * right pointers.
*/ */
brw->intel.gen = screen->gen; brw->gen = screen->gen;
brwInitVtbl( brw ); brwInitVtbl( brw );
@ -313,7 +313,7 @@ brwCreateContext(int api,
/* Reinitialize the context point state. It depends on ctx->Const values. */ /* Reinitialize the context point state. It depends on ctx->Const values. */
_mesa_init_point(ctx); _mesa_init_point(ctx);
if (intel->gen >= 6) { if (brw->gen >= 6) {
/* Create a new hardware context. Using a hardware context means that /* Create a new hardware context. Using a hardware context means that
* our GPU state will be saved/restored on context switch, allowing us * our GPU state will be saved/restored on context switch, allowing us
* to assume that the GPU is in the same state we left it in. * to assume that the GPU is in the same state we left it in.
@ -341,11 +341,11 @@ brwCreateContext(int api,
ctx->DriverFlags.NewRasterizerDiscard = BRW_NEW_RASTERIZER_DISCARD; ctx->DriverFlags.NewRasterizerDiscard = BRW_NEW_RASTERIZER_DISCARD;
ctx->DriverFlags.NewUniformBuffer = BRW_NEW_UNIFORM_BUFFER; ctx->DriverFlags.NewUniformBuffer = BRW_NEW_UNIFORM_BUFFER;
if (brw->is_g4x || intel->gen >= 5) { if (brw->is_g4x || brw->gen >= 5) {
brw->CMD_VF_STATISTICS = GM45_3DSTATE_VF_STATISTICS; brw->CMD_VF_STATISTICS = GM45_3DSTATE_VF_STATISTICS;
brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_GM45; brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_GM45;
brw->has_surface_tile_offset = true; brw->has_surface_tile_offset = true;
if (intel->gen < 6) if (brw->gen < 6)
brw->has_compr4 = true; brw->has_compr4 = true;
brw->has_aa_line_parameters = true; brw->has_aa_line_parameters = true;
brw->has_pln = true; brw->has_pln = true;
@ -355,37 +355,37 @@ brwCreateContext(int api,
} }
/* WM maximum threads is number of EUs times number of threads per EU. */ /* WM maximum threads is number of EUs times number of threads per EU. */
assert(intel->gen <= 7); assert(brw->gen <= 7);
if (brw->is_haswell) { if (brw->is_haswell) {
if (intel->gt == 1) { if (brw->gt == 1) {
brw->max_wm_threads = 102; brw->max_wm_threads = 102;
brw->max_vs_threads = 70; brw->max_vs_threads = 70;
brw->urb.size = 128; brw->urb.size = 128;
brw->urb.max_vs_entries = 640; brw->urb.max_vs_entries = 640;
brw->urb.max_gs_entries = 256; brw->urb.max_gs_entries = 256;
} else if (intel->gt == 2) { } else if (brw->gt == 2) {
brw->max_wm_threads = 204; brw->max_wm_threads = 204;
brw->max_vs_threads = 280; brw->max_vs_threads = 280;
brw->urb.size = 256; brw->urb.size = 256;
brw->urb.max_vs_entries = 1664; brw->urb.max_vs_entries = 1664;
brw->urb.max_gs_entries = 640; brw->urb.max_gs_entries = 640;
} else if (intel->gt == 3) { } else if (brw->gt == 3) {
brw->max_wm_threads = 408; brw->max_wm_threads = 408;
brw->max_vs_threads = 280; brw->max_vs_threads = 280;
brw->urb.size = 512; brw->urb.size = 512;
brw->urb.max_vs_entries = 1664; brw->urb.max_vs_entries = 1664;
brw->urb.max_gs_entries = 640; brw->urb.max_gs_entries = 640;
} }
} else if (intel->gen == 7) { } else if (brw->gen == 7) {
if (intel->gt == 1) { if (brw->gt == 1) {
brw->max_wm_threads = 48; brw->max_wm_threads = 48;
brw->max_vs_threads = 36; brw->max_vs_threads = 36;
brw->max_gs_threads = 36; brw->max_gs_threads = 36;
brw->urb.size = 128; brw->urb.size = 128;
brw->urb.max_vs_entries = 512; brw->urb.max_vs_entries = 512;
brw->urb.max_gs_entries = 192; brw->urb.max_gs_entries = 192;
} else if (intel->gt == 2) { } else if (brw->gt == 2) {
brw->max_wm_threads = 172; brw->max_wm_threads = 172;
brw->max_vs_threads = 128; brw->max_vs_threads = 128;
brw->max_gs_threads = 128; brw->max_gs_threads = 128;
@ -395,8 +395,8 @@ brwCreateContext(int api,
} else { } else {
assert(!"Unknown gen7 device."); assert(!"Unknown gen7 device.");
} }
} else if (intel->gen == 6) { } else if (brw->gen == 6) {
if (intel->gt == 2) { if (brw->gt == 2) {
brw->max_wm_threads = 80; brw->max_wm_threads = 80;
brw->max_vs_threads = 60; brw->max_vs_threads = 60;
brw->max_gs_threads = 60; brw->max_gs_threads = 60;
@ -412,7 +412,7 @@ brwCreateContext(int api,
brw->urb.max_gs_entries = 256; brw->urb.max_gs_entries = 256;
} }
brw->urb.gen6_gs_previously_active = false; brw->urb.gen6_gs_previously_active = false;
} else if (intel->gen == 5) { } else if (brw->gen == 5) {
brw->urb.size = 1024; brw->urb.size = 1024;
brw->max_vs_threads = 72; brw->max_vs_threads = 72;
brw->max_gs_threads = 32; brw->max_gs_threads = 32;
@ -422,7 +422,7 @@ brwCreateContext(int api,
brw->max_vs_threads = 32; brw->max_vs_threads = 32;
brw->max_gs_threads = 2; brw->max_gs_threads = 2;
brw->max_wm_threads = 10 * 5; brw->max_wm_threads = 10 * 5;
} else if (intel->gen < 6) { } else if (brw->gen < 6) {
brw->urb.size = 256; brw->urb.size = 256;
brw->max_vs_threads = 16; brw->max_vs_threads = 16;
brw->max_gs_threads = 2; brw->max_gs_threads = 2;
@ -430,7 +430,7 @@ brwCreateContext(int api,
brw->has_negative_rhw_bug = true; brw->has_negative_rhw_bug = true;
} }
if (intel->gen <= 7) { if (brw->gen <= 7) {
brw->needs_unlit_centroid_workaround = true; brw->needs_unlit_centroid_workaround = true;
} }

View File

@ -877,6 +877,9 @@ struct brw_context
bool emit_state_always; bool emit_state_always;
int gen;
int gt;
bool is_g4x; bool is_g4x;
bool is_baytrail; bool is_baytrail;
bool is_haswell; bool is_haswell;
@ -1445,9 +1448,7 @@ static inline uint32_t
brw_program_reloc(struct brw_context *brw, uint32_t state_offset, brw_program_reloc(struct brw_context *brw, uint32_t state_offset,
uint32_t prog_offset) uint32_t prog_offset)
{ {
struct intel_context *intel = &brw->intel; if (brw->gen >= 5) {
if (intel->gen >= 5) {
/* Using state base address. */ /* Using state base address. */
return prog_offset; return prog_offset;
} }

View File

@ -160,7 +160,6 @@ static void brw_emit_prim(struct brw_context *brw,
const struct _mesa_prim *prim, const struct _mesa_prim *prim,
uint32_t hw_prim) uint32_t hw_prim)
{ {
struct intel_context *intel = &brw->intel;
int verts_per_instance; int verts_per_instance;
int vertex_access_type; int vertex_access_type;
int start_vertex_location; int start_vertex_location;
@ -181,7 +180,7 @@ static void brw_emit_prim(struct brw_context *brw,
} }
/* We only need to trim the primitive count on pre-Gen6. */ /* We only need to trim the primitive count on pre-Gen6. */
if (intel->gen < 6) if (brw->gen < 6)
verts_per_instance = trim(prim->mode, prim->count); verts_per_instance = trim(prim->mode, prim->count);
else else
verts_per_instance = prim->count; verts_per_instance = prim->count;
@ -363,7 +362,6 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
GLuint min_index, GLuint min_index,
GLuint max_index ) GLuint max_index )
{ {
struct intel_context *intel = intel_context(ctx);
struct brw_context *brw = brw_context(ctx); struct brw_context *brw = brw_context(ctx);
bool retval = true; bool retval = true;
GLuint i; GLuint i;
@ -431,7 +429,7 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
brw->basevertex = prim->basevertex; brw->basevertex = prim->basevertex;
brw->state.dirty.brw |= BRW_NEW_VERTICES; brw->state.dirty.brw |= BRW_NEW_VERTICES;
} }
if (intel->gen < 6) if (brw->gen < 6)
brw_set_prim(brw, &prim[i]); brw_set_prim(brw, &prim[i]);
else else
gen6_set_prim(brw, &prim[i]); gen6_set_prim(brw, &prim[i]);
@ -447,7 +445,7 @@ retry:
brw_upload_state(brw); brw_upload_state(brw);
} }
if (intel->gen >= 7) if (brw->gen >= 7)
gen7_emit_prim(brw, &prim[i], brw->primitive); gen7_emit_prim(brw, &prim[i], brw->primitive);
else else
brw_emit_prim(brw, &prim[i], brw->primitive); brw_emit_prim(brw, &prim[i], brw->primitive);

View File

@ -226,7 +226,6 @@ static unsigned
get_surface_type(struct brw_context *brw, get_surface_type(struct brw_context *brw,
const struct gl_client_array *glarray) const struct gl_client_array *glarray)
{ {
struct intel_context *intel = &brw->intel;
int size = glarray->Size; int size = glarray->Size;
if (unlikely(INTEL_DEBUG & DEBUG_VERTS)) if (unlikely(INTEL_DEBUG & DEBUG_VERTS))
@ -265,7 +264,7 @@ get_surface_type(struct brw_context *brw,
return ubyte_types_norm[size]; return ubyte_types_norm[size];
} }
case GL_FIXED: case GL_FIXED:
if (intel->gen >= 8 || brw->is_haswell) if (brw->gen >= 8 || brw->is_haswell)
return fixed_point_types[size]; return fixed_point_types[size];
/* This produces GL_FIXED inputs as values between INT32_MIN and /* This produces GL_FIXED inputs as values between INT32_MIN and
@ -279,7 +278,7 @@ get_surface_type(struct brw_context *brw,
*/ */
case GL_INT_2_10_10_10_REV: case GL_INT_2_10_10_10_REV:
assert(size == 4); assert(size == 4);
if (intel->gen >= 8 || brw->is_haswell) { if (brw->gen >= 8 || brw->is_haswell) {
return glarray->Format == GL_BGRA return glarray->Format == GL_BGRA
? BRW_SURFACEFORMAT_B10G10R10A2_SNORM ? BRW_SURFACEFORMAT_B10G10R10A2_SNORM
: BRW_SURFACEFORMAT_R10G10B10A2_SNORM; : BRW_SURFACEFORMAT_R10G10B10A2_SNORM;
@ -287,7 +286,7 @@ get_surface_type(struct brw_context *brw,
return BRW_SURFACEFORMAT_R10G10B10A2_UINT; return BRW_SURFACEFORMAT_R10G10B10A2_UINT;
case GL_UNSIGNED_INT_2_10_10_10_REV: case GL_UNSIGNED_INT_2_10_10_10_REV:
assert(size == 4); assert(size == 4);
if (intel->gen >= 8 || brw->is_haswell) { if (brw->gen >= 8 || brw->is_haswell) {
return glarray->Format == GL_BGRA return glarray->Format == GL_BGRA
? BRW_SURFACEFORMAT_B10G10R10A2_UNORM ? BRW_SURFACEFORMAT_B10G10R10A2_UNORM
: BRW_SURFACEFORMAT_R10G10B10A2_UNORM; : BRW_SURFACEFORMAT_R10G10B10A2_UNORM;
@ -304,7 +303,7 @@ get_surface_type(struct brw_context *brw,
*/ */
if (glarray->Type == GL_INT_2_10_10_10_REV) { if (glarray->Type == GL_INT_2_10_10_10_REV) {
assert(size == 4); assert(size == 4);
if (intel->gen >= 8 || brw->is_haswell) { if (brw->gen >= 8 || brw->is_haswell) {
return glarray->Format == GL_BGRA return glarray->Format == GL_BGRA
? BRW_SURFACEFORMAT_B10G10R10A2_SSCALED ? BRW_SURFACEFORMAT_B10G10R10A2_SSCALED
: BRW_SURFACEFORMAT_R10G10B10A2_SSCALED; : BRW_SURFACEFORMAT_R10G10B10A2_SSCALED;
@ -312,7 +311,7 @@ get_surface_type(struct brw_context *brw,
return BRW_SURFACEFORMAT_R10G10B10A2_UINT; return BRW_SURFACEFORMAT_R10G10B10A2_UINT;
} else if (glarray->Type == GL_UNSIGNED_INT_2_10_10_10_REV) { } else if (glarray->Type == GL_UNSIGNED_INT_2_10_10_10_REV) {
assert(size == 4); assert(size == 4);
if (intel->gen >= 8 || brw->is_haswell) { if (brw->gen >= 8 || brw->is_haswell) {
return glarray->Format == GL_BGRA return glarray->Format == GL_BGRA
? BRW_SURFACEFORMAT_B10G10R10A2_USCALED ? BRW_SURFACEFORMAT_B10G10R10A2_USCALED
: BRW_SURFACEFORMAT_R10G10B10A2_USCALED; : BRW_SURFACEFORMAT_R10G10B10A2_USCALED;
@ -331,7 +330,7 @@ get_surface_type(struct brw_context *brw,
case GL_UNSIGNED_SHORT: return ushort_types_scale[size]; case GL_UNSIGNED_SHORT: return ushort_types_scale[size];
case GL_UNSIGNED_BYTE: return ubyte_types_scale[size]; case GL_UNSIGNED_BYTE: return ubyte_types_scale[size];
case GL_FIXED: case GL_FIXED:
if (intel->gen >= 8 || brw->is_haswell) if (brw->gen >= 8 || brw->is_haswell)
return fixed_point_types[size]; return fixed_point_types[size];
/* This produces GL_FIXED inputs as values between INT32_MIN and /* This produces GL_FIXED inputs as values between INT32_MIN and
@ -401,7 +400,6 @@ copy_array_to_vbo_array(struct brw_context *brw,
static void brw_prepare_vertices(struct brw_context *brw) static void brw_prepare_vertices(struct brw_context *brw)
{ {
struct gl_context *ctx = &brw->intel.ctx; struct gl_context *ctx = &brw->intel.ctx;
struct intel_context *intel = intel_context(ctx);
/* CACHE_NEW_VS_PROG */ /* CACHE_NEW_VS_PROG */
GLbitfield64 vs_inputs = brw->vs.prog_data->inputs_read; GLbitfield64 vs_inputs = brw->vs.prog_data->inputs_read;
const unsigned char *ptr = NULL; const unsigned char *ptr = NULL;
@ -420,7 +418,7 @@ static void brw_prepare_vertices(struct brw_context *brw)
* is passed sideband through the fixed function units. So, we need to * is passed sideband through the fixed function units. So, we need to
* prepare the vertex buffer for it, but it's not present in inputs_read. * prepare the vertex buffer for it, but it's not present in inputs_read.
*/ */
if (intel->gen >= 6 && (ctx->Polygon.FrontMode != GL_FILL || if (brw->gen >= 6 && (ctx->Polygon.FrontMode != GL_FILL ||
ctx->Polygon.BackMode != GL_FILL)) { ctx->Polygon.BackMode != GL_FILL)) {
vs_inputs |= VERT_BIT_EDGEFLAG; vs_inputs |= VERT_BIT_EDGEFLAG;
} }
@ -592,8 +590,6 @@ static void brw_prepare_vertices(struct brw_context *brw)
static void brw_emit_vertices(struct brw_context *brw) static void brw_emit_vertices(struct brw_context *brw)
{ {
struct gl_context *ctx = &brw->intel.ctx;
struct intel_context *intel = intel_context(ctx);
GLuint i, nr_elements; GLuint i, nr_elements;
brw_prepare_vertices(brw); brw_prepare_vertices(brw);
@ -612,7 +608,7 @@ static void brw_emit_vertices(struct brw_context *brw)
if (nr_elements == 0) { if (nr_elements == 0) {
BEGIN_BATCH(3); BEGIN_BATCH(3);
OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | 1); OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | 1);
if (intel->gen >= 6) { if (brw->gen >= 6) {
OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT) | OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT) |
GEN6_VE0_VALID | GEN6_VE0_VALID |
(BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT) | (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT) |
@ -635,7 +631,7 @@ static void brw_emit_vertices(struct brw_context *brw)
*/ */
if (brw->vb.nr_buffers) { if (brw->vb.nr_buffers) {
if (intel->gen >= 6) { if (brw->gen >= 6) {
assert(brw->vb.nr_buffers <= 33); assert(brw->vb.nr_buffers <= 33);
} else { } else {
assert(brw->vb.nr_buffers <= 17); assert(brw->vb.nr_buffers <= 17);
@ -647,7 +643,7 @@ static void brw_emit_vertices(struct brw_context *brw)
struct brw_vertex_buffer *buffer = &brw->vb.buffers[i]; struct brw_vertex_buffer *buffer = &brw->vb.buffers[i];
uint32_t dw0; uint32_t dw0;
if (intel->gen >= 6) { if (brw->gen >= 6) {
dw0 = buffer->step_rate dw0 = buffer->step_rate
? GEN6_VB0_ACCESS_INSTANCEDATA ? GEN6_VB0_ACCESS_INSTANCEDATA
: GEN6_VB0_ACCESS_VERTEXDATA; : GEN6_VB0_ACCESS_VERTEXDATA;
@ -659,12 +655,12 @@ static void brw_emit_vertices(struct brw_context *brw)
dw0 |= i << BRW_VB0_INDEX_SHIFT; dw0 |= i << BRW_VB0_INDEX_SHIFT;
} }
if (intel->gen >= 7) if (brw->gen >= 7)
dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE; dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE;
OUT_BATCH(dw0 | (buffer->stride << BRW_VB0_PITCH_SHIFT)); OUT_BATCH(dw0 | (buffer->stride << BRW_VB0_PITCH_SHIFT));
OUT_RELOC(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->offset); OUT_RELOC(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->offset);
if (intel->gen >= 5) { if (brw->gen >= 5) {
OUT_RELOC(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->bo->size - 1); OUT_RELOC(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->bo->size - 1);
} else } else
OUT_BATCH(0); OUT_BATCH(0);
@ -676,7 +672,7 @@ static void brw_emit_vertices(struct brw_context *brw)
/* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS, presumably /* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS, presumably
* for VertexID/InstanceID. * for VertexID/InstanceID.
*/ */
if (intel->gen >= 6) { if (brw->gen >= 6) {
assert(nr_elements <= 34); assert(nr_elements <= 34);
} else { } else {
assert(nr_elements <= 18); assert(nr_elements <= 18);
@ -705,7 +701,7 @@ static void brw_emit_vertices(struct brw_context *brw)
* of in the VUE. We have to upload it sideband as the last vertex * of in the VUE. We have to upload it sideband as the last vertex
* element according to the B-Spec. * element according to the B-Spec.
*/ */
if (intel->gen >= 6) { if (brw->gen >= 6) {
gen6_edgeflag_input = input; gen6_edgeflag_input = input;
continue; continue;
} }
@ -723,7 +719,7 @@ static void brw_emit_vertices(struct brw_context *brw)
break; break;
} }
if (intel->gen >= 6) { if (brw->gen >= 6) {
OUT_BATCH((input->buffer << GEN6_VE0_INDEX_SHIFT) | OUT_BATCH((input->buffer << GEN6_VE0_INDEX_SHIFT) |
GEN6_VE0_VALID | GEN6_VE0_VALID |
(format << BRW_VE0_FORMAT_SHIFT) | (format << BRW_VE0_FORMAT_SHIFT) |
@ -735,7 +731,7 @@ static void brw_emit_vertices(struct brw_context *brw)
(input->offset << BRW_VE0_SRC_OFFSET_SHIFT)); (input->offset << BRW_VE0_SRC_OFFSET_SHIFT));
} }
if (intel->gen >= 5) if (brw->gen >= 5)
OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) | OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
(comp1 << BRW_VE1_COMPONENT_1_SHIFT) | (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
(comp2 << BRW_VE1_COMPONENT_2_SHIFT) | (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
@ -748,7 +744,7 @@ static void brw_emit_vertices(struct brw_context *brw)
((i * 4) << BRW_VE1_DST_OFFSET_SHIFT)); ((i * 4) << BRW_VE1_DST_OFFSET_SHIFT));
} }
if (intel->gen >= 6 && gen6_edgeflag_input) { if (brw->gen >= 6 && gen6_edgeflag_input) {
uint32_t format = get_surface_type(brw, gen6_edgeflag_input->glarray); uint32_t format = get_surface_type(brw, gen6_edgeflag_input->glarray);
OUT_BATCH((gen6_edgeflag_input->buffer << GEN6_VE0_INDEX_SHIFT) | OUT_BATCH((gen6_edgeflag_input->buffer << GEN6_VE0_INDEX_SHIFT) |
@ -770,7 +766,7 @@ static void brw_emit_vertices(struct brw_context *brw)
(BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) | (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
(BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT)); (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT));
if (intel->gen >= 6) { if (brw->gen >= 6) {
dw0 |= GEN6_VE0_VALID; dw0 |= GEN6_VE0_VALID;
} else { } else {
dw0 |= BRW_VE0_VALID; dw0 |= BRW_VE0_VALID;

View File

@ -111,7 +111,7 @@ brw_set_compression_control(struct brw_compile *p,
{ {
p->compressed = (compression_control == BRW_COMPRESSION_COMPRESSED); p->compressed = (compression_control == BRW_COMPRESSION_COMPRESSED);
if (p->brw->intel.gen >= 6) { if (p->brw->gen >= 6) {
/* Since we don't use the 32-wide support in gen6, we translate /* Since we don't use the 32-wide support in gen6, we translate
* the pre-gen6 compression control here. * the pre-gen6 compression control here.
*/ */
@ -154,7 +154,7 @@ void brw_set_saturate( struct brw_compile *p, bool enable )
void brw_set_acc_write_control(struct brw_compile *p, GLuint value) void brw_set_acc_write_control(struct brw_compile *p, GLuint value)
{ {
if (p->brw->intel.gen >= 6) if (p->brw->gen >= 6)
p->current->header.acc_wr_control = value; p->current->header.acc_wr_control = value;
} }
@ -260,6 +260,6 @@ brw_dump_compile(struct brw_compile *p, FILE *out, int start, int end)
offset += 16; offset += 16;
} }
brw_disasm(stdout, insn, p->brw->intel.gen); brw_disasm(stdout, insn, p->brw->gen);
} }
} }

View File

@ -330,7 +330,6 @@ set_control_index(struct brw_context *brw,
struct brw_compact_instruction *dst, struct brw_compact_instruction *dst,
struct brw_instruction *src) struct brw_instruction *src)
{ {
struct intel_context *intel = &brw->intel;
uint32_t *src_u32 = (uint32_t *)src; uint32_t *src_u32 = (uint32_t *)src;
uint32_t uncompacted = 0; uint32_t uncompacted = 0;
@ -339,7 +338,7 @@ set_control_index(struct brw_context *brw,
/* On gen7, the flag register number gets integrated into the control /* On gen7, the flag register number gets integrated into the control
* index. * index.
*/ */
if (intel->gen >= 7) if (brw->gen >= 7)
uncompacted |= ((src_u32[2] >> 25) & 0x3) << 17; uncompacted |= ((src_u32[2] >> 25) & 0x3) << 17;
for (int i = 0; i < 32; i++) { for (int i = 0; i < 32; i++) {
@ -450,7 +449,6 @@ brw_try_compact_instruction(struct brw_compile *p,
struct brw_instruction *src) struct brw_instruction *src)
{ {
struct brw_context *brw = p->brw; struct brw_context *brw = p->brw;
struct intel_context *intel = &brw->intel;
struct brw_compact_instruction temp; struct brw_compact_instruction temp;
if (src->header.opcode == BRW_OPCODE_IF || if (src->header.opcode == BRW_OPCODE_IF ||
@ -482,7 +480,7 @@ brw_try_compact_instruction(struct brw_compile *p,
return false; return false;
temp.dw0.acc_wr_control = src->header.acc_wr_control; temp.dw0.acc_wr_control = src->header.acc_wr_control;
temp.dw0.conditionalmod = src->header.destreg__conditionalmod; temp.dw0.conditionalmod = src->header.destreg__conditionalmod;
if (intel->gen <= 6) if (brw->gen <= 6)
temp.dw0.flag_subreg_nr = src->bits2.da1.flag_subreg_nr; temp.dw0.flag_subreg_nr = src->bits2.da1.flag_subreg_nr;
temp.dw0.cmpt_ctrl = 1; temp.dw0.cmpt_ctrl = 1;
if (!set_src0_index(&temp, src)) if (!set_src0_index(&temp, src))
@ -503,14 +501,13 @@ set_uncompacted_control(struct brw_context *brw,
struct brw_instruction *dst, struct brw_instruction *dst,
struct brw_compact_instruction *src) struct brw_compact_instruction *src)
{ {
struct intel_context *intel = &brw->intel;
uint32_t *dst_u32 = (uint32_t *)dst; uint32_t *dst_u32 = (uint32_t *)dst;
uint32_t uncompacted = control_index_table[src->dw0.control_index]; uint32_t uncompacted = control_index_table[src->dw0.control_index];
dst_u32[0] |= ((uncompacted >> 0) & 0xffff) << 8; dst_u32[0] |= ((uncompacted >> 0) & 0xffff) << 8;
dst_u32[0] |= ((uncompacted >> 16) & 0x1) << 31; dst_u32[0] |= ((uncompacted >> 16) & 0x1) << 31;
if (intel->gen >= 7) if (brw->gen >= 7)
dst_u32[2] |= ((uncompacted >> 17) & 0x3) << 25; dst_u32[2] |= ((uncompacted >> 17) & 0x3) << 25;
} }
@ -561,7 +558,6 @@ brw_uncompact_instruction(struct brw_context *brw,
struct brw_instruction *dst, struct brw_instruction *dst,
struct brw_compact_instruction *src) struct brw_compact_instruction *src)
{ {
struct intel_context *intel = &brw->intel;
memset(dst, 0, sizeof(*dst)); memset(dst, 0, sizeof(*dst));
dst->header.opcode = src->dw0.opcode; dst->header.opcode = src->dw0.opcode;
@ -572,7 +568,7 @@ brw_uncompact_instruction(struct brw_context *brw,
set_uncompacted_subreg(dst, src); set_uncompacted_subreg(dst, src);
dst->header.acc_wr_control = src->dw0.acc_wr_control; dst->header.acc_wr_control = src->dw0.acc_wr_control;
dst->header.destreg__conditionalmod = src->dw0.conditionalmod; dst->header.destreg__conditionalmod = src->dw0.conditionalmod;
if (intel->gen <= 6) if (brw->gen <= 6)
dst->bits2.da1.flag_subreg_nr = src->dw0.flag_subreg_nr; dst->bits2.da1.flag_subreg_nr = src->dw0.flag_subreg_nr;
set_uncompacted_src0(dst, src); set_uncompacted_src0(dst, src);
set_uncompacted_src1(dst, src); set_uncompacted_src1(dst, src);
@ -585,15 +581,14 @@ void brw_debug_compact_uncompact(struct brw_context *brw,
struct brw_instruction *orig, struct brw_instruction *orig,
struct brw_instruction *uncompacted) struct brw_instruction *uncompacted)
{ {
struct intel_context *intel = &brw->intel;
fprintf(stderr, "Instruction compact/uncompact changed (gen%d):\n", fprintf(stderr, "Instruction compact/uncompact changed (gen%d):\n",
intel->gen); brw->gen);
fprintf(stderr, " before: "); fprintf(stderr, " before: ");
brw_disasm(stderr, orig, intel->gen); brw_disasm(stderr, orig, brw->gen);
fprintf(stderr, " after: "); fprintf(stderr, " after: ");
brw_disasm(stderr, uncompacted, intel->gen); brw_disasm(stderr, uncompacted, brw->gen);
uint32_t *before_bits = (uint32_t *)orig; uint32_t *before_bits = (uint32_t *)orig;
uint32_t *after_bits = (uint32_t *)uncompacted; uint32_t *after_bits = (uint32_t *)uncompacted;
@ -638,7 +633,6 @@ update_uip_jip(struct brw_instruction *insn, int this_old_ip,
void void
brw_init_compaction_tables(struct brw_context *brw) brw_init_compaction_tables(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel;
assert(gen6_control_index_table[ARRAY_SIZE(gen6_control_index_table) - 1] != 0); assert(gen6_control_index_table[ARRAY_SIZE(gen6_control_index_table) - 1] != 0);
assert(gen6_datatype_table[ARRAY_SIZE(gen6_datatype_table) - 1] != 0); assert(gen6_datatype_table[ARRAY_SIZE(gen6_datatype_table) - 1] != 0);
assert(gen6_subreg_table[ARRAY_SIZE(gen6_subreg_table) - 1] != 0); assert(gen6_subreg_table[ARRAY_SIZE(gen6_subreg_table) - 1] != 0);
@ -648,7 +642,7 @@ brw_init_compaction_tables(struct brw_context *brw)
assert(gen7_subreg_table[ARRAY_SIZE(gen6_subreg_table) - 1] != 0); assert(gen7_subreg_table[ARRAY_SIZE(gen6_subreg_table) - 1] != 0);
assert(gen7_src_index_table[ARRAY_SIZE(gen6_src_index_table) - 1] != 0); assert(gen7_src_index_table[ARRAY_SIZE(gen6_src_index_table) - 1] != 0);
switch (intel->gen) { switch (brw->gen) {
case 7: case 7:
control_index_table = gen7_control_index_table; control_index_table = gen7_control_index_table;
datatype_table = gen7_datatype_table; datatype_table = gen7_datatype_table;
@ -670,7 +664,6 @@ void
brw_compact_instructions(struct brw_compile *p) brw_compact_instructions(struct brw_compile *p)
{ {
struct brw_context *brw = p->brw; struct brw_context *brw = p->brw;
struct intel_context *intel = &brw->intel;
void *store = p->store; void *store = p->store;
/* For an instruction at byte offset 8*i before compaction, this is the number /* For an instruction at byte offset 8*i before compaction, this is the number
* of compacted instructions that preceded it. * of compacted instructions that preceded it.
@ -681,7 +674,7 @@ brw_compact_instructions(struct brw_compile *p)
*/ */
int old_ip[p->next_insn_offset / 8]; int old_ip[p->next_insn_offset / 8];
if (intel->gen < 6) if (brw->gen < 6)
return; return;
int src_offset; int src_offset;
@ -759,7 +752,7 @@ brw_compact_instructions(struct brw_compile *p)
case BRW_OPCODE_ELSE: case BRW_OPCODE_ELSE:
case BRW_OPCODE_ENDIF: case BRW_OPCODE_ENDIF:
case BRW_OPCODE_WHILE: case BRW_OPCODE_WHILE:
if (intel->gen == 6) { if (brw->gen == 6) {
target_old_ip = this_old_ip + insn->bits1.branch_gen6.jump_count; target_old_ip = this_old_ip + insn->bits1.branch_gen6.jump_count;
target_compacted_count = compacted_counts[target_old_ip]; target_compacted_count = compacted_counts[target_old_ip];
insn->bits1.branch_gen6.jump_count -= (target_compacted_count - insn->bits1.branch_gen6.jump_count -= (target_compacted_count -

View File

@ -63,8 +63,8 @@ gen6_resolve_implied_move(struct brw_compile *p,
struct brw_reg *src, struct brw_reg *src,
GLuint msg_reg_nr) GLuint msg_reg_nr)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
if (intel->gen < 6) if (brw->gen < 6)
return; return;
if (src->file == BRW_MESSAGE_REGISTER_FILE) if (src->file == BRW_MESSAGE_REGISTER_FILE)
@ -92,8 +92,8 @@ gen7_convert_mrf_to_grf(struct brw_compile *p, struct brw_reg *reg)
* Since we're pretending to have 16 MRFs anyway, we may as well use the * Since we're pretending to have 16 MRFs anyway, we may as well use the
* registers required for messages with EOT. * registers required for messages with EOT.
*/ */
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
if (intel->gen == 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) { if (brw->gen == 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
reg->file = BRW_GENERAL_REGISTER_FILE; reg->file = BRW_GENERAL_REGISTER_FILE;
reg->nr += GEN7_MRF_HACK_START; reg->nr += GEN7_MRF_HACK_START;
} }
@ -240,14 +240,13 @@ brw_set_src0(struct brw_compile *p, struct brw_instruction *insn,
struct brw_reg reg) struct brw_reg reg)
{ {
struct brw_context *brw = p->brw; struct brw_context *brw = p->brw;
struct intel_context *intel = &brw->intel;
if (reg.type != BRW_ARCHITECTURE_REGISTER_FILE) if (reg.type != BRW_ARCHITECTURE_REGISTER_FILE)
assert(reg.nr < 128); assert(reg.nr < 128);
gen7_convert_mrf_to_grf(p, &reg); gen7_convert_mrf_to_grf(p, &reg);
if (intel->gen >= 6 && (insn->header.opcode == BRW_OPCODE_SEND || if (brw->gen >= 6 && (insn->header.opcode == BRW_OPCODE_SEND ||
insn->header.opcode == BRW_OPCODE_SENDC)) { insn->header.opcode == BRW_OPCODE_SENDC)) {
/* Any source modifiers or regions will be ignored, since this just /* Any source modifiers or regions will be ignored, since this just
* identifies the MRF/GRF to start reading the message contents from. * identifies the MRF/GRF to start reading the message contents from.
@ -416,17 +415,17 @@ brw_set_message_descriptor(struct brw_compile *p,
bool header_present, bool header_present,
bool end_of_thread) bool end_of_thread)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
brw_set_src1(p, inst, brw_imm_d(0)); brw_set_src1(p, inst, brw_imm_d(0));
if (intel->gen >= 5) { if (brw->gen >= 5) {
inst->bits3.generic_gen5.header_present = header_present; inst->bits3.generic_gen5.header_present = header_present;
inst->bits3.generic_gen5.response_length = response_length; inst->bits3.generic_gen5.response_length = response_length;
inst->bits3.generic_gen5.msg_length = msg_length; inst->bits3.generic_gen5.msg_length = msg_length;
inst->bits3.generic_gen5.end_of_thread = end_of_thread; inst->bits3.generic_gen5.end_of_thread = end_of_thread;
if (intel->gen >= 6) { if (brw->gen >= 6) {
/* On Gen6+ Message target/SFID goes in bits 27:24 of the header */ /* On Gen6+ Message target/SFID goes in bits 27:24 of the header */
inst->header.destreg__conditionalmod = sfid; inst->header.destreg__conditionalmod = sfid;
} else { } else {
@ -450,7 +449,6 @@ static void brw_set_math_message( struct brw_compile *p,
GLuint dataType ) GLuint dataType )
{ {
struct brw_context *brw = p->brw; struct brw_context *brw = p->brw;
struct intel_context *intel = &brw->intel;
unsigned msg_length; unsigned msg_length;
unsigned response_length; unsigned response_length;
@ -481,7 +479,7 @@ static void brw_set_math_message( struct brw_compile *p,
brw_set_message_descriptor(p, insn, BRW_SFID_MATH, brw_set_message_descriptor(p, insn, BRW_SFID_MATH,
msg_length, response_length, false, false); msg_length, response_length, false, false);
if (intel->gen == 5) { if (brw->gen == 5) {
insn->bits3.math_gen5.function = function; insn->bits3.math_gen5.function = function;
insn->bits3.math_gen5.int_type = integer_type; insn->bits3.math_gen5.int_type = integer_type;
insn->bits3.math_gen5.precision = low_precision; insn->bits3.math_gen5.precision = low_precision;
@ -527,11 +525,10 @@ static void brw_set_urb_message( struct brw_compile *p,
GLuint swizzle_control ) GLuint swizzle_control )
{ {
struct brw_context *brw = p->brw; struct brw_context *brw = p->brw;
struct intel_context *intel = &brw->intel;
brw_set_message_descriptor(p, insn, BRW_SFID_URB, brw_set_message_descriptor(p, insn, BRW_SFID_URB,
msg_length, response_length, true, end_of_thread); msg_length, response_length, true, end_of_thread);
if (intel->gen == 7) { if (brw->gen == 7) {
insn->bits3.urb_gen7.opcode = 0; /* URB_WRITE_HWORD */ insn->bits3.urb_gen7.opcode = 0; /* URB_WRITE_HWORD */
insn->bits3.urb_gen7.offset = offset; insn->bits3.urb_gen7.offset = offset;
assert(swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE); assert(swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
@ -539,7 +536,7 @@ static void brw_set_urb_message( struct brw_compile *p,
/* per_slot_offset = 0 makes it ignore offsets in message header */ /* per_slot_offset = 0 makes it ignore offsets in message header */
insn->bits3.urb_gen7.per_slot_offset = 0; insn->bits3.urb_gen7.per_slot_offset = 0;
insn->bits3.urb_gen7.complete = complete; insn->bits3.urb_gen7.complete = complete;
} else if (intel->gen >= 5) { } else if (brw->gen >= 5) {
insn->bits3.urb_gen5.opcode = 0; /* URB_WRITE */ insn->bits3.urb_gen5.opcode = 0; /* URB_WRITE */
insn->bits3.urb_gen5.offset = offset; insn->bits3.urb_gen5.offset = offset;
insn->bits3.urb_gen5.swizzle_control = swizzle_control; insn->bits3.urb_gen5.swizzle_control = swizzle_control;
@ -570,16 +567,15 @@ brw_set_dp_write_message(struct brw_compile *p,
GLuint send_commit_msg) GLuint send_commit_msg)
{ {
struct brw_context *brw = p->brw; struct brw_context *brw = p->brw;
struct intel_context *intel = &brw->intel;
unsigned sfid; unsigned sfid;
if (intel->gen >= 7) { if (brw->gen >= 7) {
/* Use the Render Cache for RT writes; otherwise use the Data Cache */ /* Use the Render Cache for RT writes; otherwise use the Data Cache */
if (msg_type == GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE) if (msg_type == GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE)
sfid = GEN6_SFID_DATAPORT_RENDER_CACHE; sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
else else
sfid = GEN7_SFID_DATAPORT_DATA_CACHE; sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
} else if (intel->gen == 6) { } else if (brw->gen == 6) {
/* Use the render cache for all write messages. */ /* Use the render cache for all write messages. */
sfid = GEN6_SFID_DATAPORT_RENDER_CACHE; sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
} else { } else {
@ -589,18 +585,18 @@ brw_set_dp_write_message(struct brw_compile *p,
brw_set_message_descriptor(p, insn, sfid, msg_length, response_length, brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
header_present, end_of_thread); header_present, end_of_thread);
if (intel->gen >= 7) { if (brw->gen >= 7) {
insn->bits3.gen7_dp.binding_table_index = binding_table_index; insn->bits3.gen7_dp.binding_table_index = binding_table_index;
insn->bits3.gen7_dp.msg_control = msg_control; insn->bits3.gen7_dp.msg_control = msg_control;
insn->bits3.gen7_dp.last_render_target = last_render_target; insn->bits3.gen7_dp.last_render_target = last_render_target;
insn->bits3.gen7_dp.msg_type = msg_type; insn->bits3.gen7_dp.msg_type = msg_type;
} else if (intel->gen == 6) { } else if (brw->gen == 6) {
insn->bits3.gen6_dp.binding_table_index = binding_table_index; insn->bits3.gen6_dp.binding_table_index = binding_table_index;
insn->bits3.gen6_dp.msg_control = msg_control; insn->bits3.gen6_dp.msg_control = msg_control;
insn->bits3.gen6_dp.last_render_target = last_render_target; insn->bits3.gen6_dp.last_render_target = last_render_target;
insn->bits3.gen6_dp.msg_type = msg_type; insn->bits3.gen6_dp.msg_type = msg_type;
insn->bits3.gen6_dp.send_commit_msg = send_commit_msg; insn->bits3.gen6_dp.send_commit_msg = send_commit_msg;
} else if (intel->gen == 5) { } else if (brw->gen == 5) {
insn->bits3.dp_write_gen5.binding_table_index = binding_table_index; insn->bits3.dp_write_gen5.binding_table_index = binding_table_index;
insn->bits3.dp_write_gen5.msg_control = msg_control; insn->bits3.dp_write_gen5.msg_control = msg_control;
insn->bits3.dp_write_gen5.last_render_target = last_render_target; insn->bits3.dp_write_gen5.last_render_target = last_render_target;
@ -627,12 +623,11 @@ brw_set_dp_read_message(struct brw_compile *p,
GLuint response_length) GLuint response_length)
{ {
struct brw_context *brw = p->brw; struct brw_context *brw = p->brw;
struct intel_context *intel = &brw->intel;
unsigned sfid; unsigned sfid;
if (intel->gen >= 7) { if (brw->gen >= 7) {
sfid = GEN7_SFID_DATAPORT_DATA_CACHE; sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
} else if (intel->gen == 6) { } else if (brw->gen == 6) {
if (target_cache == BRW_DATAPORT_READ_TARGET_RENDER_CACHE) if (target_cache == BRW_DATAPORT_READ_TARGET_RENDER_CACHE)
sfid = GEN6_SFID_DATAPORT_RENDER_CACHE; sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
else else
@ -644,18 +639,18 @@ brw_set_dp_read_message(struct brw_compile *p,
brw_set_message_descriptor(p, insn, sfid, msg_length, response_length, brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
header_present, false); header_present, false);
if (intel->gen >= 7) { if (brw->gen >= 7) {
insn->bits3.gen7_dp.binding_table_index = binding_table_index; insn->bits3.gen7_dp.binding_table_index = binding_table_index;
insn->bits3.gen7_dp.msg_control = msg_control; insn->bits3.gen7_dp.msg_control = msg_control;
insn->bits3.gen7_dp.last_render_target = 0; insn->bits3.gen7_dp.last_render_target = 0;
insn->bits3.gen7_dp.msg_type = msg_type; insn->bits3.gen7_dp.msg_type = msg_type;
} else if (intel->gen == 6) { } else if (brw->gen == 6) {
insn->bits3.gen6_dp.binding_table_index = binding_table_index; insn->bits3.gen6_dp.binding_table_index = binding_table_index;
insn->bits3.gen6_dp.msg_control = msg_control; insn->bits3.gen6_dp.msg_control = msg_control;
insn->bits3.gen6_dp.last_render_target = 0; insn->bits3.gen6_dp.last_render_target = 0;
insn->bits3.gen6_dp.msg_type = msg_type; insn->bits3.gen6_dp.msg_type = msg_type;
insn->bits3.gen6_dp.send_commit_msg = 0; insn->bits3.gen6_dp.send_commit_msg = 0;
} else if (intel->gen == 5) { } else if (brw->gen == 5) {
insn->bits3.dp_read_gen5.binding_table_index = binding_table_index; insn->bits3.dp_read_gen5.binding_table_index = binding_table_index;
insn->bits3.dp_read_gen5.msg_control = msg_control; insn->bits3.dp_read_gen5.msg_control = msg_control;
insn->bits3.dp_read_gen5.msg_type = msg_type; insn->bits3.dp_read_gen5.msg_type = msg_type;
@ -686,17 +681,16 @@ brw_set_sampler_message(struct brw_compile *p,
GLuint return_format) GLuint return_format)
{ {
struct brw_context *brw = p->brw; struct brw_context *brw = p->brw;
struct intel_context *intel = &brw->intel;
brw_set_message_descriptor(p, insn, BRW_SFID_SAMPLER, msg_length, brw_set_message_descriptor(p, insn, BRW_SFID_SAMPLER, msg_length,
response_length, header_present, false); response_length, header_present, false);
if (intel->gen >= 7) { if (brw->gen >= 7) {
insn->bits3.sampler_gen7.binding_table_index = binding_table_index; insn->bits3.sampler_gen7.binding_table_index = binding_table_index;
insn->bits3.sampler_gen7.sampler = sampler; insn->bits3.sampler_gen7.sampler = sampler;
insn->bits3.sampler_gen7.msg_type = msg_type; insn->bits3.sampler_gen7.msg_type = msg_type;
insn->bits3.sampler_gen7.simd_mode = simd_mode; insn->bits3.sampler_gen7.simd_mode = simd_mode;
} else if (intel->gen >= 5) { } else if (brw->gen >= 5) {
insn->bits3.sampler_gen5.binding_table_index = binding_table_index; insn->bits3.sampler_gen5.binding_table_index = binding_table_index;
insn->bits3.sampler_gen5.sampler = sampler; insn->bits3.sampler_gen5.sampler = sampler;
insn->bits3.sampler_gen5.msg_type = msg_type; insn->bits3.sampler_gen5.msg_type = msg_type;
@ -788,7 +782,7 @@ static struct brw_instruction *brw_alu3(struct brw_compile *p,
struct brw_reg src1, struct brw_reg src1,
struct brw_reg src2) struct brw_reg src2)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
struct brw_instruction *insn = next_insn(p, opcode); struct brw_instruction *insn = next_insn(p, opcode);
gen7_convert_mrf_to_grf(p, &dest); gen7_convert_mrf_to_grf(p, &dest);
@ -839,7 +833,7 @@ static struct brw_instruction *brw_alu3(struct brw_compile *p,
insn->bits1.da3src.src2_abs = src2.abs; insn->bits1.da3src.src2_abs = src2.abs;
insn->bits1.da3src.src2_negate = src2.negate; insn->bits1.da3src.src2_negate = src2.negate;
if (intel->gen >= 7) { if (brw->gen >= 7) {
/* Set both the source and destination types based on dest.type, /* Set both the source and destination types based on dest.type,
* ignoring the source register types. The MAD and LRP emitters ensure * ignoring the source register types. The MAD and LRP emitters ensure
* that all four types are float. The BFE and BFI2 emitters, however, * that all four types are float. The BFE and BFI2 emitters, however,
@ -927,7 +921,7 @@ void brw_##OP(struct brw_compile *p, \
brw_set_dest(p, rnd, dest); \ brw_set_dest(p, rnd, dest); \
brw_set_src0(p, rnd, src); \ brw_set_src0(p, rnd, src); \
\ \
if (p->brw->intel.gen < 6) { \ if (p->brw->gen < 6) { \
/* turn on round-increments */ \ /* turn on round-increments */ \
rnd->header.destreg__conditionalmod = BRW_CONDITIONAL_R; \ rnd->header.destreg__conditionalmod = BRW_CONDITIONAL_R; \
add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \ add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
@ -1145,18 +1139,18 @@ get_inner_do_insn(struct brw_compile *p)
struct brw_instruction * struct brw_instruction *
brw_IF(struct brw_compile *p, GLuint execute_size) brw_IF(struct brw_compile *p, GLuint execute_size)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
struct brw_instruction *insn; struct brw_instruction *insn;
insn = next_insn(p, BRW_OPCODE_IF); insn = next_insn(p, BRW_OPCODE_IF);
/* Override the defaults for this instruction: /* Override the defaults for this instruction:
*/ */
if (intel->gen < 6) { if (brw->gen < 6) {
brw_set_dest(p, insn, brw_ip_reg()); brw_set_dest(p, insn, brw_ip_reg());
brw_set_src0(p, insn, brw_ip_reg()); brw_set_src0(p, insn, brw_ip_reg());
brw_set_src1(p, insn, brw_imm_d(0x0)); brw_set_src1(p, insn, brw_imm_d(0x0));
} else if (intel->gen == 6) { } else if (brw->gen == 6) {
brw_set_dest(p, insn, brw_imm_w(0)); brw_set_dest(p, insn, brw_imm_w(0));
insn->bits1.branch_gen6.jump_count = 0; insn->bits1.branch_gen6.jump_count = 0;
brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D))); brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
@ -1264,7 +1258,7 @@ patch_IF_ELSE(struct brw_compile *p,
struct brw_instruction *else_inst, struct brw_instruction *else_inst,
struct brw_instruction *endif_inst) struct brw_instruction *endif_inst)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
/* We shouldn't be patching IF and ELSE instructions in single program flow /* We shouldn't be patching IF and ELSE instructions in single program flow
* mode when gen < 6, because in single program flow mode on those * mode when gen < 6, because in single program flow mode on those
@ -1278,7 +1272,7 @@ patch_IF_ELSE(struct brw_compile *p,
* instructions to conditional ADDs. So we do patch IF and ELSE * instructions to conditional ADDs. So we do patch IF and ELSE
* instructions in single program flow mode on those platforms. * instructions in single program flow mode on those platforms.
*/ */
if (intel->gen < 6) if (brw->gen < 6)
assert(!p->single_program_flow); assert(!p->single_program_flow);
assert(if_inst != NULL && if_inst->header.opcode == BRW_OPCODE_IF); assert(if_inst != NULL && if_inst->header.opcode == BRW_OPCODE_IF);
@ -1289,7 +1283,7 @@ patch_IF_ELSE(struct brw_compile *p,
/* Jump count is for 64bit data chunk each, so one 128bit instruction /* Jump count is for 64bit data chunk each, so one 128bit instruction
* requires 2 chunks. * requires 2 chunks.
*/ */
if (intel->gen >= 5) if (brw->gen >= 5)
br = 2; br = 2;
assert(endif_inst->header.opcode == BRW_OPCODE_ENDIF); assert(endif_inst->header.opcode == BRW_OPCODE_ENDIF);
@ -1297,7 +1291,7 @@ patch_IF_ELSE(struct brw_compile *p,
if (else_inst == NULL) { if (else_inst == NULL) {
/* Patch IF -> ENDIF */ /* Patch IF -> ENDIF */
if (intel->gen < 6) { if (brw->gen < 6) {
/* Turn it into an IFF, which means no mask stack operations for /* Turn it into an IFF, which means no mask stack operations for
* all-false and jumping past the ENDIF. * all-false and jumping past the ENDIF.
*/ */
@ -1305,7 +1299,7 @@ patch_IF_ELSE(struct brw_compile *p,
if_inst->bits3.if_else.jump_count = br * (endif_inst - if_inst + 1); if_inst->bits3.if_else.jump_count = br * (endif_inst - if_inst + 1);
if_inst->bits3.if_else.pop_count = 0; if_inst->bits3.if_else.pop_count = 0;
if_inst->bits3.if_else.pad0 = 0; if_inst->bits3.if_else.pad0 = 0;
} else if (intel->gen == 6) { } else if (brw->gen == 6) {
/* As of gen6, there is no IFF and IF must point to the ENDIF. */ /* As of gen6, there is no IFF and IF must point to the ENDIF. */
if_inst->bits1.branch_gen6.jump_count = br * (endif_inst - if_inst); if_inst->bits1.branch_gen6.jump_count = br * (endif_inst - if_inst);
} else { } else {
@ -1316,23 +1310,23 @@ patch_IF_ELSE(struct brw_compile *p,
else_inst->header.execution_size = if_inst->header.execution_size; else_inst->header.execution_size = if_inst->header.execution_size;
/* Patch IF -> ELSE */ /* Patch IF -> ELSE */
if (intel->gen < 6) { if (brw->gen < 6) {
if_inst->bits3.if_else.jump_count = br * (else_inst - if_inst); if_inst->bits3.if_else.jump_count = br * (else_inst - if_inst);
if_inst->bits3.if_else.pop_count = 0; if_inst->bits3.if_else.pop_count = 0;
if_inst->bits3.if_else.pad0 = 0; if_inst->bits3.if_else.pad0 = 0;
} else if (intel->gen == 6) { } else if (brw->gen == 6) {
if_inst->bits1.branch_gen6.jump_count = br * (else_inst - if_inst + 1); if_inst->bits1.branch_gen6.jump_count = br * (else_inst - if_inst + 1);
} }
/* Patch ELSE -> ENDIF */ /* Patch ELSE -> ENDIF */
if (intel->gen < 6) { if (brw->gen < 6) {
/* BRW_OPCODE_ELSE pre-gen6 should point just past the /* BRW_OPCODE_ELSE pre-gen6 should point just past the
* matching ENDIF. * matching ENDIF.
*/ */
else_inst->bits3.if_else.jump_count = br*(endif_inst - else_inst + 1); else_inst->bits3.if_else.jump_count = br*(endif_inst - else_inst + 1);
else_inst->bits3.if_else.pop_count = 1; else_inst->bits3.if_else.pop_count = 1;
else_inst->bits3.if_else.pad0 = 0; else_inst->bits3.if_else.pad0 = 0;
} else if (intel->gen == 6) { } else if (brw->gen == 6) {
/* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */ /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
else_inst->bits1.branch_gen6.jump_count = br*(endif_inst - else_inst); else_inst->bits1.branch_gen6.jump_count = br*(endif_inst - else_inst);
} else { } else {
@ -1348,16 +1342,16 @@ patch_IF_ELSE(struct brw_compile *p,
void void
brw_ELSE(struct brw_compile *p) brw_ELSE(struct brw_compile *p)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
struct brw_instruction *insn; struct brw_instruction *insn;
insn = next_insn(p, BRW_OPCODE_ELSE); insn = next_insn(p, BRW_OPCODE_ELSE);
if (intel->gen < 6) { if (brw->gen < 6) {
brw_set_dest(p, insn, brw_ip_reg()); brw_set_dest(p, insn, brw_ip_reg());
brw_set_src0(p, insn, brw_ip_reg()); brw_set_src0(p, insn, brw_ip_reg());
brw_set_src1(p, insn, brw_imm_d(0x0)); brw_set_src1(p, insn, brw_imm_d(0x0));
} else if (intel->gen == 6) { } else if (brw->gen == 6) {
brw_set_dest(p, insn, brw_imm_w(0)); brw_set_dest(p, insn, brw_imm_w(0));
insn->bits1.branch_gen6.jump_count = 0; insn->bits1.branch_gen6.jump_count = 0;
brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D)); brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
@ -1381,7 +1375,7 @@ brw_ELSE(struct brw_compile *p)
void void
brw_ENDIF(struct brw_compile *p) brw_ENDIF(struct brw_compile *p)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
struct brw_instruction *insn = NULL; struct brw_instruction *insn = NULL;
struct brw_instruction *else_inst = NULL; struct brw_instruction *else_inst = NULL;
struct brw_instruction *if_inst = NULL; struct brw_instruction *if_inst = NULL;
@ -1400,7 +1394,7 @@ brw_ENDIF(struct brw_compile *p)
* instructions to conditional ADDs. So we only do this trick on Gen4 and * instructions to conditional ADDs. So we only do this trick on Gen4 and
* Gen5. * Gen5.
*/ */
if (intel->gen < 6 && p->single_program_flow) if (brw->gen < 6 && p->single_program_flow)
emit_endif = false; emit_endif = false;
/* /*
@ -1426,11 +1420,11 @@ brw_ENDIF(struct brw_compile *p)
return; return;
} }
if (intel->gen < 6) { if (brw->gen < 6) {
brw_set_dest(p, insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD)); brw_set_dest(p, insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD));
brw_set_src0(p, insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD)); brw_set_src0(p, insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD));
brw_set_src1(p, insn, brw_imm_d(0x0)); brw_set_src1(p, insn, brw_imm_d(0x0));
} else if (intel->gen == 6) { } else if (brw->gen == 6) {
brw_set_dest(p, insn, brw_imm_w(0)); brw_set_dest(p, insn, brw_imm_w(0));
brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D)); brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D)); brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
@ -1445,11 +1439,11 @@ brw_ENDIF(struct brw_compile *p)
insn->header.thread_control = BRW_THREAD_SWITCH; insn->header.thread_control = BRW_THREAD_SWITCH;
/* Also pop item off the stack in the endif instruction: */ /* Also pop item off the stack in the endif instruction: */
if (intel->gen < 6) { if (brw->gen < 6) {
insn->bits3.if_else.jump_count = 0; insn->bits3.if_else.jump_count = 0;
insn->bits3.if_else.pop_count = 1; insn->bits3.if_else.pop_count = 1;
insn->bits3.if_else.pad0 = 0; insn->bits3.if_else.pad0 = 0;
} else if (intel->gen == 6) { } else if (brw->gen == 6) {
insn->bits1.branch_gen6.jump_count = 2; insn->bits1.branch_gen6.jump_count = 2;
} else { } else {
insn->bits3.break_cont.jip = 2; insn->bits3.break_cont.jip = 2;
@ -1459,11 +1453,11 @@ brw_ENDIF(struct brw_compile *p)
struct brw_instruction *brw_BREAK(struct brw_compile *p) struct brw_instruction *brw_BREAK(struct brw_compile *p)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
struct brw_instruction *insn; struct brw_instruction *insn;
insn = next_insn(p, BRW_OPCODE_BREAK); insn = next_insn(p, BRW_OPCODE_BREAK);
if (intel->gen >= 6) { if (brw->gen >= 6) {
brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D)); brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D)); brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
brw_set_src1(p, insn, brw_imm_d(0x0)); brw_set_src1(p, insn, brw_imm_d(0x0));
@ -1547,9 +1541,9 @@ struct brw_instruction *gen6_HALT(struct brw_compile *p)
*/ */
struct brw_instruction *brw_DO(struct brw_compile *p, GLuint execute_size) struct brw_instruction *brw_DO(struct brw_compile *p, GLuint execute_size)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
if (intel->gen >= 6 || p->single_program_flow) { if (brw->gen >= 6 || p->single_program_flow) {
push_loop_stack(p, &p->store[p->nr_insn]); push_loop_stack(p, &p->store[p->nr_insn]);
return &p->store[p->nr_insn]; return &p->store[p->nr_insn];
} else { } else {
@ -1583,10 +1577,10 @@ struct brw_instruction *brw_DO(struct brw_compile *p, GLuint execute_size)
static void static void
brw_patch_break_cont(struct brw_compile *p, struct brw_instruction *while_inst) brw_patch_break_cont(struct brw_compile *p, struct brw_instruction *while_inst)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
struct brw_instruction *do_inst = get_inner_do_insn(p); struct brw_instruction *do_inst = get_inner_do_insn(p);
struct brw_instruction *inst; struct brw_instruction *inst;
int br = (intel->gen == 5) ? 2 : 1; int br = (brw->gen == 5) ? 2 : 1;
for (inst = while_inst - 1; inst != do_inst; inst--) { for (inst = while_inst - 1; inst != do_inst; inst--) {
/* If the jump count is != 0, that means that this instruction has already /* If the jump count is != 0, that means that this instruction has already
@ -1605,14 +1599,14 @@ brw_patch_break_cont(struct brw_compile *p, struct brw_instruction *while_inst)
struct brw_instruction *brw_WHILE(struct brw_compile *p) struct brw_instruction *brw_WHILE(struct brw_compile *p)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
struct brw_instruction *insn, *do_insn; struct brw_instruction *insn, *do_insn;
GLuint br = 1; GLuint br = 1;
if (intel->gen >= 5) if (brw->gen >= 5)
br = 2; br = 2;
if (intel->gen >= 7) { if (brw->gen >= 7) {
insn = next_insn(p, BRW_OPCODE_WHILE); insn = next_insn(p, BRW_OPCODE_WHILE);
do_insn = get_inner_do_insn(p); do_insn = get_inner_do_insn(p);
@ -1622,7 +1616,7 @@ struct brw_instruction *brw_WHILE(struct brw_compile *p)
insn->bits3.break_cont.jip = br * (do_insn - insn); insn->bits3.break_cont.jip = br * (do_insn - insn);
insn->header.execution_size = BRW_EXECUTE_8; insn->header.execution_size = BRW_EXECUTE_8;
} else if (intel->gen == 6) { } else if (brw->gen == 6) {
insn = next_insn(p, BRW_OPCODE_WHILE); insn = next_insn(p, BRW_OPCODE_WHILE);
do_insn = get_inner_do_insn(p); do_insn = get_inner_do_insn(p);
@ -1672,11 +1666,11 @@ struct brw_instruction *brw_WHILE(struct brw_compile *p)
*/ */
void brw_land_fwd_jump(struct brw_compile *p, int jmp_insn_idx) void brw_land_fwd_jump(struct brw_compile *p, int jmp_insn_idx)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
struct brw_instruction *jmp_insn = &p->store[jmp_insn_idx]; struct brw_instruction *jmp_insn = &p->store[jmp_insn_idx];
GLuint jmpi = 1; GLuint jmpi = 1;
if (intel->gen >= 5) if (brw->gen >= 5)
jmpi = 2; jmpi = 2;
assert(jmp_insn->header.opcode == BRW_OPCODE_JMPI); assert(jmp_insn->header.opcode == BRW_OPCODE_JMPI);
@ -1697,7 +1691,7 @@ void brw_CMP(struct brw_compile *p,
struct brw_reg src0, struct brw_reg src0,
struct brw_reg src1) struct brw_reg src1)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
struct brw_instruction *insn = next_insn(p, BRW_OPCODE_CMP); struct brw_instruction *insn = next_insn(p, BRW_OPCODE_CMP);
insn->header.destreg__conditionalmod = conditional; insn->header.destreg__conditionalmod = conditional;
@ -1725,7 +1719,7 @@ void brw_CMP(struct brw_compile *p,
* It also applies to other Gen7 platforms (IVB, BYT) even though it isn't * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
* mentioned on their work-arounds pages. * mentioned on their work-arounds pages.
*/ */
if (intel->gen == 7) { if (brw->gen == 7) {
if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE && if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
dest.nr == BRW_ARF_NULL) { dest.nr == BRW_ARF_NULL) {
insn->header.thread_control = BRW_THREAD_SWITCH; insn->header.thread_control = BRW_THREAD_SWITCH;
@ -1763,21 +1757,21 @@ void brw_math( struct brw_compile *p,
GLuint data_type, GLuint data_type,
GLuint precision ) GLuint precision )
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
if (intel->gen >= 6) { if (brw->gen >= 6) {
struct brw_instruction *insn = next_insn(p, BRW_OPCODE_MATH); struct brw_instruction *insn = next_insn(p, BRW_OPCODE_MATH);
assert(dest.file == BRW_GENERAL_REGISTER_FILE || assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
(intel->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE)); (brw->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
assert(src.file == BRW_GENERAL_REGISTER_FILE); assert(src.file == BRW_GENERAL_REGISTER_FILE);
assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1); assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
if (intel->gen == 6) if (brw->gen == 6)
assert(src.hstride == BRW_HORIZONTAL_STRIDE_1); assert(src.hstride == BRW_HORIZONTAL_STRIDE_1);
/* Source modifiers are ignored for extended math instructions on Gen6. */ /* Source modifiers are ignored for extended math instructions on Gen6. */
if (intel->gen == 6) { if (brw->gen == 6) {
assert(!src.negate); assert(!src.negate);
assert(!src.abs); assert(!src.abs);
} }
@ -1826,20 +1820,16 @@ void brw_math2(struct brw_compile *p,
struct brw_reg src0, struct brw_reg src0,
struct brw_reg src1) struct brw_reg src1)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
struct brw_instruction *insn = next_insn(p, BRW_OPCODE_MATH); struct brw_instruction *insn = next_insn(p, BRW_OPCODE_MATH);
assert(intel->gen >= 6);
(void) intel;
assert(dest.file == BRW_GENERAL_REGISTER_FILE || assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
(intel->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE)); (brw->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
assert(src0.file == BRW_GENERAL_REGISTER_FILE); assert(src0.file == BRW_GENERAL_REGISTER_FILE);
assert(src1.file == BRW_GENERAL_REGISTER_FILE); assert(src1.file == BRW_GENERAL_REGISTER_FILE);
assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1); assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
if (intel->gen == 6) { if (brw->gen == 6) {
assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1); assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1); assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
} }
@ -1855,7 +1845,7 @@ void brw_math2(struct brw_compile *p,
} }
/* Source modifiers are ignored for extended math instructions on Gen6. */ /* Source modifiers are ignored for extended math instructions on Gen6. */
if (intel->gen == 6) { if (brw->gen == 6) {
assert(!src0.negate); assert(!src0.negate);
assert(!src0.abs); assert(!src0.abs);
assert(!src1.negate); assert(!src1.negate);
@ -1885,11 +1875,11 @@ void brw_oword_block_write_scratch(struct brw_compile *p,
int num_regs, int num_regs,
GLuint offset) GLuint offset)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
uint32_t msg_control, msg_type; uint32_t msg_control, msg_type;
int mlen; int mlen;
if (intel->gen >= 6) if (brw->gen >= 6)
offset /= 16; offset /= 16;
mrf = retype(mrf, BRW_REGISTER_TYPE_UD); mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
@ -1948,7 +1938,7 @@ void brw_oword_block_write_scratch(struct brw_compile *p,
* protection. Our use of DP writes is all about register * protection. Our use of DP writes is all about register
* spilling within a thread. * spilling within a thread.
*/ */
if (intel->gen >= 6) { if (brw->gen >= 6) {
dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW); dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
send_commit_msg = 0; send_commit_msg = 0;
} else { } else {
@ -1957,13 +1947,13 @@ void brw_oword_block_write_scratch(struct brw_compile *p,
} }
brw_set_dest(p, insn, dest); brw_set_dest(p, insn, dest);
if (intel->gen >= 6) { if (brw->gen >= 6) {
brw_set_src0(p, insn, mrf); brw_set_src0(p, insn, mrf);
} else { } else {
brw_set_src0(p, insn, brw_null_reg()); brw_set_src0(p, insn, brw_null_reg());
} }
if (intel->gen >= 6) if (brw->gen >= 6)
msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE; msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
else else
msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE; msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
@ -1997,11 +1987,11 @@ brw_oword_block_read_scratch(struct brw_compile *p,
int num_regs, int num_regs,
GLuint offset) GLuint offset)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
uint32_t msg_control; uint32_t msg_control;
int rlen; int rlen;
if (intel->gen >= 6) if (brw->gen >= 6)
offset /= 16; offset /= 16;
mrf = retype(mrf, BRW_REGISTER_TYPE_UD); mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
@ -2040,7 +2030,7 @@ brw_oword_block_read_scratch(struct brw_compile *p,
insn->header.destreg__conditionalmod = mrf.nr; insn->header.destreg__conditionalmod = mrf.nr;
brw_set_dest(p, insn, dest); /* UW? */ brw_set_dest(p, insn, dest); /* UW? */
if (intel->gen >= 6) { if (brw->gen >= 6) {
brw_set_src0(p, insn, mrf); brw_set_src0(p, insn, mrf);
} else { } else {
brw_set_src0(p, insn, brw_null_reg()); brw_set_src0(p, insn, brw_null_reg());
@ -2069,10 +2059,10 @@ void brw_oword_block_read(struct brw_compile *p,
uint32_t offset, uint32_t offset,
uint32_t bind_table_index) uint32_t bind_table_index)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
/* On newer hardware, offset is in units of owords. */ /* On newer hardware, offset is in units of owords. */
if (intel->gen >= 6) if (brw->gen >= 6)
offset /= 16; offset /= 16;
mrf = retype(mrf, BRW_REGISTER_TYPE_UD); mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
@ -2098,7 +2088,7 @@ void brw_oword_block_read(struct brw_compile *p,
dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW); dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
brw_set_dest(p, insn, dest); brw_set_dest(p, insn, dest);
if (intel->gen >= 6) { if (brw->gen >= 6) {
brw_set_src0(p, insn, mrf); brw_set_src0(p, insn, mrf);
} else { } else {
brw_set_src0(p, insn, brw_null_reg()); brw_set_src0(p, insn, brw_null_reg());
@ -2129,7 +2119,7 @@ void brw_fb_WRITE(struct brw_compile *p,
bool eot, bool eot,
bool header_present) bool header_present)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
struct brw_instruction *insn; struct brw_instruction *insn;
GLuint msg_type; GLuint msg_type;
struct brw_reg dest; struct brw_reg dest;
@ -2139,7 +2129,7 @@ void brw_fb_WRITE(struct brw_compile *p,
else else
dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW); dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
if (intel->gen >= 6) { if (brw->gen >= 6) {
insn = next_insn(p, BRW_OPCODE_SENDC); insn = next_insn(p, BRW_OPCODE_SENDC);
} else { } else {
insn = next_insn(p, BRW_OPCODE_SEND); insn = next_insn(p, BRW_OPCODE_SEND);
@ -2148,7 +2138,7 @@ void brw_fb_WRITE(struct brw_compile *p,
insn->header.predicate_control = 0; insn->header.predicate_control = 0;
insn->header.compression_control = BRW_COMPRESSION_NONE; insn->header.compression_control = BRW_COMPRESSION_NONE;
if (intel->gen >= 6) { if (brw->gen >= 6) {
/* headerless version, just submit color payload */ /* headerless version, just submit color payload */
src0 = brw_message_reg(msg_reg_nr); src0 = brw_message_reg(msg_reg_nr);
@ -2193,7 +2183,7 @@ void brw_SAMPLE(struct brw_compile *p,
GLuint simd_mode, GLuint simd_mode,
GLuint return_format) GLuint return_format)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
struct brw_instruction *insn; struct brw_instruction *insn;
gen6_resolve_implied_move(p, &src0, msg_reg_nr); gen6_resolve_implied_move(p, &src0, msg_reg_nr);
@ -2201,7 +2191,7 @@ void brw_SAMPLE(struct brw_compile *p,
insn = next_insn(p, BRW_OPCODE_SEND); insn = next_insn(p, BRW_OPCODE_SEND);
insn->header.predicate_control = 0; /* XXX */ insn->header.predicate_control = 0; /* XXX */
insn->header.compression_control = BRW_COMPRESSION_NONE; insn->header.compression_control = BRW_COMPRESSION_NONE;
if (intel->gen < 6) if (brw->gen < 6)
insn->header.destreg__conditionalmod = msg_reg_nr; insn->header.destreg__conditionalmod = msg_reg_nr;
brw_set_dest(p, insn, dest); brw_set_dest(p, insn, dest);
@ -2234,12 +2224,12 @@ void brw_urb_WRITE(struct brw_compile *p,
GLuint offset, GLuint offset,
GLuint swizzle) GLuint swizzle)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
struct brw_instruction *insn; struct brw_instruction *insn;
gen6_resolve_implied_move(p, &src0, msg_reg_nr); gen6_resolve_implied_move(p, &src0, msg_reg_nr);
if (intel->gen == 7) { if (brw->gen == 7) {
/* Enable Channel Masks in the URB_WRITE_HWORD message header */ /* Enable Channel Masks in the URB_WRITE_HWORD message header */
brw_push_insn_state(p); brw_push_insn_state(p);
brw_set_access_mode(p, BRW_ALIGN_1); brw_set_access_mode(p, BRW_ALIGN_1);
@ -2259,7 +2249,7 @@ void brw_urb_WRITE(struct brw_compile *p,
brw_set_src0(p, insn, src0); brw_set_src0(p, insn, src0);
brw_set_src1(p, insn, brw_imm_d(0)); brw_set_src1(p, insn, brw_imm_d(0));
if (intel->gen < 6) if (brw->gen < 6)
insn->header.destreg__conditionalmod = msg_reg_nr; insn->header.destreg__conditionalmod = msg_reg_nr;
brw_set_urb_message(p, brw_set_urb_message(p,
@ -2313,7 +2303,7 @@ brw_find_next_block_end(struct brw_compile *p, int start)
static int static int
brw_find_loop_end(struct brw_compile *p, int start) brw_find_loop_end(struct brw_compile *p, int start)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
int ip; int ip;
int scale = 8; int scale = 8;
void *store = p->store; void *store = p->store;
@ -2325,7 +2315,7 @@ brw_find_loop_end(struct brw_compile *p, int start)
struct brw_instruction *insn = store + ip; struct brw_instruction *insn = store + ip;
if (insn->header.opcode == BRW_OPCODE_WHILE) { if (insn->header.opcode == BRW_OPCODE_WHILE) {
int jip = intel->gen == 6 ? insn->bits1.branch_gen6.jump_count int jip = brw->gen == 6 ? insn->bits1.branch_gen6.jump_count
: insn->bits3.break_cont.jip; : insn->bits3.break_cont.jip;
if (ip + jip * scale <= start) if (ip + jip * scale <= start)
return ip; return ip;
@ -2341,12 +2331,12 @@ brw_find_loop_end(struct brw_compile *p, int start)
void void
brw_set_uip_jip(struct brw_compile *p) brw_set_uip_jip(struct brw_compile *p)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
int ip; int ip;
int scale = 8; int scale = 8;
void *store = p->store; void *store = p->store;
if (intel->gen < 6) if (brw->gen < 6)
return; return;
for (ip = 0; ip < p->next_insn_offset; ip = next_ip(p, ip)) { for (ip = 0; ip < p->next_insn_offset; ip = next_ip(p, ip)) {
@ -2368,7 +2358,7 @@ brw_set_uip_jip(struct brw_compile *p)
/* Gen7 UIP points to WHILE; Gen6 points just after it */ /* Gen7 UIP points to WHILE; Gen6 points just after it */
insn->bits3.break_cont.uip = insn->bits3.break_cont.uip =
(brw_find_loop_end(p, ip) - ip + (brw_find_loop_end(p, ip) - ip +
(intel->gen == 6 ? 16 : 0)) / scale; (brw->gen == 6 ? 16 : 0)) / scale;
break; break;
case BRW_OPCODE_CONTINUE: case BRW_OPCODE_CONTINUE:
assert(block_end_ip != 0); assert(block_end_ip != 0);
@ -2419,7 +2409,7 @@ void brw_ff_sync(struct brw_compile *p,
GLuint response_length, GLuint response_length,
bool eot) bool eot)
{ {
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
struct brw_instruction *insn; struct brw_instruction *insn;
gen6_resolve_implied_move(p, &src0, msg_reg_nr); gen6_resolve_implied_move(p, &src0, msg_reg_nr);
@ -2429,7 +2419,7 @@ void brw_ff_sync(struct brw_compile *p,
brw_set_src0(p, insn, src0); brw_set_src0(p, insn, src0);
brw_set_src1(p, insn, brw_imm_d(0)); brw_set_src1(p, insn, brw_imm_d(0));
if (intel->gen < 6) if (brw->gen < 6)
insn->header.destreg__conditionalmod = msg_reg_nr; insn->header.destreg__conditionalmod = msg_reg_nr;
brw_set_ff_sync_message(p, brw_set_ff_sync_message(p,
@ -2499,8 +2489,7 @@ void brw_shader_time_add(struct brw_compile *p,
uint32_t surf_index) uint32_t surf_index)
{ {
struct brw_context *brw = p->brw; struct brw_context *brw = p->brw;
struct intel_context *intel = &p->brw->intel; assert(brw->gen >= 7);
assert(intel->gen >= 7);
brw_push_insn_state(p); brw_push_insn_state(p);
brw_set_access_mode(p, BRW_ALIGN_1); brw_set_access_mode(p, BRW_ALIGN_1);

View File

@ -193,7 +193,7 @@ fs_visitor::IF(uint32_t predicate)
fs_inst * fs_inst *
fs_visitor::IF(fs_reg src0, fs_reg src1, uint32_t condition) fs_visitor::IF(fs_reg src0, fs_reg src1, uint32_t condition)
{ {
assert(intel->gen >= 6); assert(brw->gen >= 6);
fs_inst *inst = new(mem_ctx) fs_inst(BRW_OPCODE_IF, fs_inst *inst = new(mem_ctx) fs_inst(BRW_OPCODE_IF,
reg_null_d, src0, src1); reg_null_d, src0, src1);
inst->conditional_mod = condition; inst->conditional_mod = condition;
@ -222,7 +222,7 @@ fs_visitor::CMP(fs_reg dst, fs_reg src0, fs_reg src1, uint32_t condition)
* mostly work out for float-interpreted-as-int since our comparisons are * mostly work out for float-interpreted-as-int since our comparisons are
* for >0, =0, <0. * for >0, =0, <0.
*/ */
if (intel->gen == 4) { if (brw->gen == 4) {
dst.type = src0.type; dst.type = src0.type;
if (dst.file == HW_REG) if (dst.file == HW_REG)
dst.fixed_hw_reg.type = dst.type; dst.fixed_hw_reg.type = dst.type;
@ -261,7 +261,7 @@ fs_visitor::VARYING_PULL_CONSTANT_LOAD(fs_reg dst, fs_reg surf_index,
varying_offset, const_offset & ~3)); varying_offset, const_offset & ~3));
int scale = 1; int scale = 1;
if (intel->gen == 4 && dispatch_width == 8) { if (brw->gen == 4 && dispatch_width == 8) {
/* Pre-gen5, we can either use a SIMD8 message that requires (header, /* Pre-gen5, we can either use a SIMD8 message that requires (header,
* u, v, r) as parameters, or we can just use the SIMD16 message * u, v, r) as parameters, or we can just use the SIMD16 message
* consisting of (header, u). We choose the second, at the cost of a * consisting of (header, u). We choose the second, at the cost of a
@ -271,7 +271,7 @@ fs_visitor::VARYING_PULL_CONSTANT_LOAD(fs_reg dst, fs_reg surf_index,
} }
enum opcode op; enum opcode op;
if (intel->gen >= 7) if (brw->gen >= 7)
op = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7; op = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7;
else else
op = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD; op = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD;
@ -280,10 +280,10 @@ fs_visitor::VARYING_PULL_CONSTANT_LOAD(fs_reg dst, fs_reg surf_index,
inst->regs_written = 4 * scale; inst->regs_written = 4 * scale;
instructions.push_tail(inst); instructions.push_tail(inst);
if (intel->gen < 7) { if (brw->gen < 7) {
inst->base_mrf = 13; inst->base_mrf = 13;
inst->header_present = true; inst->header_present = true;
if (intel->gen == 4) if (brw->gen == 4)
inst->mlen = 3; inst->mlen = 3;
else else
inst->mlen = 1 + dispatch_width / 8; inst->mlen = 1 + dispatch_width / 8;
@ -357,7 +357,7 @@ fs_inst::is_send_from_grf()
bool bool
fs_visitor::can_do_source_mods(fs_inst *inst) fs_visitor::can_do_source_mods(fs_inst *inst)
{ {
if (intel->gen == 6 && inst->is_math()) if (brw->gen == 6 && inst->is_math())
return false; return false;
if (inst->is_send_from_grf()) if (inst->is_send_from_grf())
@ -493,7 +493,7 @@ fs_visitor::type_size(const struct glsl_type *type)
fs_reg fs_reg
fs_visitor::get_timestamp() fs_visitor::get_timestamp()
{ {
assert(intel->gen >= 7); assert(brw->gen >= 7);
fs_reg ts = fs_reg(retype(brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE, fs_reg ts = fs_reg(retype(brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
BRW_ARF_TIMESTAMP, BRW_ARF_TIMESTAMP,
@ -930,7 +930,7 @@ fs_visitor::emit_fragcoord_interpolation(ir_variable *ir)
wpos.reg_offset++; wpos.reg_offset++;
/* gl_FragCoord.z */ /* gl_FragCoord.z */
if (intel->gen >= 6) { if (brw->gen >= 6) {
emit(MOV(wpos, fs_reg(brw_vec8_grf(c->source_depth_reg, 0)))); emit(MOV(wpos, fs_reg(brw_vec8_grf(c->source_depth_reg, 0))));
} else { } else {
emit(FS_OPCODE_LINTERP, wpos, emit(FS_OPCODE_LINTERP, wpos,
@ -952,7 +952,7 @@ fs_visitor::emit_linterp(const fs_reg &attr, const fs_reg &interp,
bool is_centroid) bool is_centroid)
{ {
brw_wm_barycentric_interp_mode barycoord_mode; brw_wm_barycentric_interp_mode barycoord_mode;
if (intel->gen >= 6) { if (brw->gen >= 6) {
if (is_centroid) { if (is_centroid) {
if (interpolation_mode == INTERP_QUALIFIER_SMOOTH) if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
barycoord_mode = BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC; barycoord_mode = BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC;
@ -1048,7 +1048,7 @@ fs_visitor::emit_general_interpolation(ir_variable *ir)
inst->predicate = BRW_PREDICATE_NORMAL; inst->predicate = BRW_PREDICATE_NORMAL;
inst->predicate_inverse = true; inst->predicate_inverse = true;
} }
if (intel->gen < 6) { if (brw->gen < 6) {
emit(BRW_OPCODE_MUL, attr, attr, this->pixel_w); emit(BRW_OPCODE_MUL, attr, attr, this->pixel_w);
} }
attr.reg_offset++; attr.reg_offset++;
@ -1068,7 +1068,7 @@ fs_visitor::emit_frontfacing_interpolation(ir_variable *ir)
fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type); fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
/* The frontfacing comes in as a bit in the thread payload. */ /* The frontfacing comes in as a bit in the thread payload. */
if (intel->gen >= 6) { if (brw->gen >= 6) {
emit(BRW_OPCODE_ASR, *reg, emit(BRW_OPCODE_ASR, *reg,
fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)), fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)),
fs_reg(15)); fs_reg(15));
@ -1097,14 +1097,14 @@ fs_visitor::fix_math_operand(fs_reg src)
* The hardware ignores source modifiers (negate and abs) on math * The hardware ignores source modifiers (negate and abs) on math
* instructions, so we also move to a temp to set those up. * instructions, so we also move to a temp to set those up.
*/ */
if (intel->gen == 6 && src.file != UNIFORM && src.file != IMM && if (brw->gen == 6 && src.file != UNIFORM && src.file != IMM &&
!src.abs && !src.negate) !src.abs && !src.negate)
return src; return src;
/* Gen7 relaxes most of the above restrictions, but still can't use IMM /* Gen7 relaxes most of the above restrictions, but still can't use IMM
* operands to math * operands to math
*/ */
if (intel->gen >= 7 && src.file != IMM) if (brw->gen >= 7 && src.file != IMM)
return src; return src;
fs_reg expanded = fs_reg(this, glsl_type::float_type); fs_reg expanded = fs_reg(this, glsl_type::float_type);
@ -1138,12 +1138,12 @@ fs_visitor::emit_math(enum opcode opcode, fs_reg dst, fs_reg src)
* Gen 6 hardware ignores source modifiers (negate and abs) on math * Gen 6 hardware ignores source modifiers (negate and abs) on math
* instructions, so we also move to a temp to set those up. * instructions, so we also move to a temp to set those up.
*/ */
if (intel->gen >= 6) if (brw->gen >= 6)
src = fix_math_operand(src); src = fix_math_operand(src);
fs_inst *inst = emit(opcode, dst, src); fs_inst *inst = emit(opcode, dst, src);
if (intel->gen < 6) { if (brw->gen < 6) {
inst->base_mrf = 2; inst->base_mrf = 2;
inst->mlen = dispatch_width / 8; inst->mlen = dispatch_width / 8;
} }
@ -1160,7 +1160,7 @@ fs_visitor::emit_math(enum opcode opcode, fs_reg dst, fs_reg src0, fs_reg src1)
switch (opcode) { switch (opcode) {
case SHADER_OPCODE_INT_QUOTIENT: case SHADER_OPCODE_INT_QUOTIENT:
case SHADER_OPCODE_INT_REMAINDER: case SHADER_OPCODE_INT_REMAINDER:
if (intel->gen >= 7 && dispatch_width == 16) if (brw->gen >= 7 && dispatch_width == 16)
fail("16-wide INTDIV unsupported\n"); fail("16-wide INTDIV unsupported\n");
break; break;
case SHADER_OPCODE_POW: case SHADER_OPCODE_POW:
@ -1170,7 +1170,7 @@ fs_visitor::emit_math(enum opcode opcode, fs_reg dst, fs_reg src0, fs_reg src1)
return NULL; return NULL;
} }
if (intel->gen >= 6) { if (brw->gen >= 6) {
src0 = fix_math_operand(src0); src0 = fix_math_operand(src0);
src1 = fix_math_operand(src1); src1 = fix_math_operand(src1);
@ -1235,7 +1235,7 @@ fs_visitor::calculate_urb_setup()
int urb_next = 0; int urb_next = 0;
/* Figure out where each of the incoming setup attributes lands. */ /* Figure out where each of the incoming setup attributes lands. */
if (intel->gen >= 6) { if (brw->gen >= 6) {
for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) { for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
if (fp->Base.InputsRead & BITFIELD64_BIT(i)) { if (fp->Base.InputsRead & BITFIELD64_BIT(i)) {
urb_setup[i] = urb_next++; urb_setup[i] = urb_next++;
@ -2248,7 +2248,7 @@ fs_visitor::compute_to_mrf()
if (scan_inst->mlen) if (scan_inst->mlen)
break; break;
if (intel->gen == 6) { if (brw->gen == 6) {
/* gen6 math instructions must have the destination be /* gen6 math instructions must have the destination be
* GRF, so no compute-to-MRF for them. * GRF, so no compute-to-MRF for them.
*/ */
@ -2599,7 +2599,7 @@ fs_visitor::insert_gen4_post_send_dependency_workarounds(fs_inst *inst)
void void
fs_visitor::insert_gen4_send_dependency_workarounds() fs_visitor::insert_gen4_send_dependency_workarounds()
{ {
if (intel->gen != 4 || brw->is_g4x) if (brw->gen != 4 || brw->is_g4x)
return; return;
/* Note that we're done with register allocation, so GRF fs_regs always /* Note that we're done with register allocation, so GRF fs_regs always
@ -2641,7 +2641,7 @@ fs_visitor::lower_uniform_pull_constant_loads()
if (inst->opcode != FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD) if (inst->opcode != FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD)
continue; continue;
if (intel->gen >= 7) { if (brw->gen >= 7) {
/* The offset arg before was a vec4-aligned byte offset. We need to /* The offset arg before was a vec4-aligned byte offset. We need to
* turn it into a dword offset. * turn it into a dword offset.
*/ */
@ -2701,7 +2701,7 @@ fs_visitor::dump_instruction(backend_instruction *be_inst)
if (inst->conditional_mod) { if (inst->conditional_mod) {
printf(".cmod"); printf(".cmod");
if (!inst->predicate && if (!inst->predicate &&
(intel->gen < 5 || (inst->opcode != BRW_OPCODE_SEL && (brw->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
inst->opcode != BRW_OPCODE_IF && inst->opcode != BRW_OPCODE_IF &&
inst->opcode != BRW_OPCODE_WHILE))) { inst->opcode != BRW_OPCODE_WHILE))) {
printf(".f0.%d\n", inst->flag_subreg); printf(".f0.%d\n", inst->flag_subreg);
@ -2826,7 +2826,7 @@ fs_visitor::setup_payload_gen6()
(fp->Base.InputsRead & (1 << VARYING_SLOT_POS)) != 0; (fp->Base.InputsRead & (1 << VARYING_SLOT_POS)) != 0;
unsigned barycentric_interp_modes = c->prog_data.barycentric_interp_modes; unsigned barycentric_interp_modes = c->prog_data.barycentric_interp_modes;
assert(intel->gen >= 6); assert(brw->gen >= 6);
/* R0-1: masks, pixel X/Y coordinates. */ /* R0-1: masks, pixel X/Y coordinates. */
c->nr_payload_regs = 2; c->nr_payload_regs = 2;
@ -2882,7 +2882,7 @@ fs_visitor::run()
sanity_param_count = fp->Base.Parameters->NumParameters; sanity_param_count = fp->Base.Parameters->NumParameters;
uint32_t orig_nr_params = c->prog_data.nr_params; uint32_t orig_nr_params = c->prog_data.nr_params;
if (intel->gen >= 6) if (brw->gen >= 6)
setup_payload_gen6(); setup_payload_gen6();
else else
setup_payload_gen4(); setup_payload_gen4();
@ -2894,7 +2894,7 @@ fs_visitor::run()
emit_shader_time_begin(); emit_shader_time_begin();
calculate_urb_setup(); calculate_urb_setup();
if (intel->gen < 6) if (brw->gen < 6)
emit_interpolation_setup_gen4(); emit_interpolation_setup_gen4();
else else
emit_interpolation_setup_gen6(); emit_interpolation_setup_gen6();
@ -3016,7 +3016,6 @@ brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c,
struct gl_shader_program *prog, struct gl_shader_program *prog,
unsigned *final_assembly_size) unsigned *final_assembly_size)
{ {
struct intel_context *intel = &brw->intel;
bool start_busy = false; bool start_busy = false;
float start_time = 0; float start_time = 0;
@ -3060,7 +3059,7 @@ brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c,
exec_list *simd16_instructions = NULL; exec_list *simd16_instructions = NULL;
fs_visitor v2(brw, c, prog, fp, 16); fs_visitor v2(brw, c, prog, fp, 16);
bool no16 = INTEL_DEBUG & DEBUG_NO16; bool no16 = INTEL_DEBUG & DEBUG_NO16;
if (intel->gen >= 5 && c->prog_data.nr_pull_params == 0 && likely(!no16)) { if (brw->gen >= 5 && c->prog_data.nr_pull_params == 0 && likely(!no16)) {
v2.import_uniforms(&v); v2.import_uniforms(&v);
if (!v2.run()) { if (!v2.run()) {
perf_debug("16-wide shader failed to compile, falling back to " perf_debug("16-wide shader failed to compile, falling back to "
@ -3095,7 +3094,6 @@ bool
brw_fs_precompile(struct gl_context *ctx, struct gl_shader_program *prog) brw_fs_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
{ {
struct brw_context *brw = brw_context(ctx); struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = &brw->intel;
struct brw_wm_prog_key key; struct brw_wm_prog_key key;
if (!prog->_LinkedShaders[MESA_SHADER_FRAGMENT]) if (!prog->_LinkedShaders[MESA_SHADER_FRAGMENT])
@ -3108,7 +3106,7 @@ brw_fs_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
memset(&key, 0, sizeof(key)); memset(&key, 0, sizeof(key));
if (intel->gen < 6) { if (brw->gen < 6) {
if (fp->UsesKill) if (fp->UsesKill)
key.iz_lookup |= IZ_PS_KILL_ALPHATEST_BIT; key.iz_lookup |= IZ_PS_KILL_ALPHATEST_BIT;
@ -3120,14 +3118,14 @@ brw_fs_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
key.iz_lookup |= IZ_DEPTH_WRITE_ENABLE_BIT; key.iz_lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
} }
if (intel->gen < 6) if (brw->gen < 6)
key.input_slots_valid |= BITFIELD64_BIT(VARYING_SLOT_POS); key.input_slots_valid |= BITFIELD64_BIT(VARYING_SLOT_POS);
for (int i = 0; i < VARYING_SLOT_MAX; i++) { for (int i = 0; i < VARYING_SLOT_MAX; i++) {
if (!(fp->Base.InputsRead & BITFIELD64_BIT(i))) if (!(fp->Base.InputsRead & BITFIELD64_BIT(i)))
continue; continue;
if (intel->gen < 6) { if (brw->gen < 6) {
if (_mesa_varying_slot_in_fs((gl_varying_slot) i)) if (_mesa_varying_slot_in_fs((gl_varying_slot) i))
key.input_slots_valid |= BITFIELD64_BIT(i); key.input_slots_valid |= BITFIELD64_BIT(i);
} }

View File

@ -62,7 +62,7 @@ fs_generator::~fs_generator()
void void
fs_generator::patch_discard_jumps_to_fb_writes() fs_generator::patch_discard_jumps_to_fb_writes()
{ {
if (intel->gen < 6 || this->discard_halt_patches.is_empty()) if (brw->gen < 6 || this->discard_halt_patches.is_empty())
return; return;
/* There is a somewhat strange undocumented requirement of using /* There is a somewhat strange undocumented requirement of using
@ -111,7 +111,7 @@ fs_generator::generate_fb_write(fs_inst *inst)
if (fp->UsesKill) { if (fp->UsesKill) {
struct brw_reg pixel_mask; struct brw_reg pixel_mask;
if (intel->gen >= 6) if (brw->gen >= 6)
pixel_mask = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW); pixel_mask = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
else else
pixel_mask = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW); pixel_mask = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
@ -120,7 +120,7 @@ fs_generator::generate_fb_write(fs_inst *inst)
} }
if (inst->header_present) { if (inst->header_present) {
if (intel->gen >= 6) { if (brw->gen >= 6) {
brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED); brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
brw_MOV(p, brw_MOV(p,
retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD), retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD),
@ -222,7 +222,7 @@ fs_generator::generate_linterp(fs_inst *inst,
if (brw->has_pln && if (brw->has_pln &&
delta_y.nr == delta_x.nr + 1 && delta_y.nr == delta_x.nr + 1 &&
(intel->gen >= 6 || (delta_x.nr & 1) == 0)) { (brw->gen >= 6 || (delta_x.nr & 1) == 0)) {
brw_PLN(p, dst, interp, delta_x); brw_PLN(p, dst, interp, delta_x);
} else { } else {
brw_LINE(p, brw_null_reg(), interp, delta_x); brw_LINE(p, brw_null_reg(), interp, delta_x);
@ -374,7 +374,7 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
if (dispatch_width == 16) if (dispatch_width == 16)
simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16; simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
if (intel->gen >= 5) { if (brw->gen >= 5) {
switch (inst->opcode) { switch (inst->opcode) {
case SHADER_OPCODE_TEX: case SHADER_OPCODE_TEX:
if (inst->shadow_compare) { if (inst->shadow_compare) {
@ -413,7 +413,7 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src
msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD; msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
break; break;
case SHADER_OPCODE_TXF_MS: case SHADER_OPCODE_TXF_MS:
if (intel->gen >= 7) if (brw->gen >= 7)
msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS; msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS;
else else
msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD; msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
@ -596,7 +596,7 @@ fs_generator::generate_ddy(fs_inst *inst, struct brw_reg dst, struct brw_reg src
void void
fs_generator::generate_discard_jump(fs_inst *inst) fs_generator::generate_discard_jump(fs_inst *inst)
{ {
assert(intel->gen >= 6); assert(brw->gen >= 6);
/* This HALT will be patched up at FB write time to point UIP at the end of /* This HALT will be patched up at FB write time to point UIP at the end of
* the program, and at brw_uip_jip() JIP will be set to the end of the * the program, and at brw_uip_jip() JIP will be set to the end of the
@ -697,7 +697,7 @@ fs_generator::generate_varying_pull_constant_load(fs_inst *inst,
struct brw_reg index, struct brw_reg index,
struct brw_reg offset) struct brw_reg offset)
{ {
assert(intel->gen < 7); /* Should use the gen7 variant. */ assert(brw->gen < 7); /* Should use the gen7 variant. */
assert(inst->header_present); assert(inst->header_present);
assert(inst->mlen); assert(inst->mlen);
@ -714,7 +714,7 @@ fs_generator::generate_varying_pull_constant_load(fs_inst *inst,
rlen = 4; rlen = 4;
} }
if (intel->gen >= 5) if (brw->gen >= 5)
msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD; msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
else { else {
/* We always use the SIMD16 message so that we only have to load U, and /* We always use the SIMD16 message so that we only have to load U, and
@ -738,7 +738,7 @@ fs_generator::generate_varying_pull_constant_load(fs_inst *inst,
send->header.compression_control = BRW_COMPRESSION_NONE; send->header.compression_control = BRW_COMPRESSION_NONE;
brw_set_dest(p, send, dst); brw_set_dest(p, send, dst);
brw_set_src0(p, send, header); brw_set_src0(p, send, header);
if (intel->gen < 6) if (brw->gen < 6)
send->header.destreg__conditionalmod = inst->base_mrf; send->header.destreg__conditionalmod = inst->base_mrf;
/* Our surface is set up as floats, regardless of what actual data is /* Our surface is set up as floats, regardless of what actual data is
@ -762,7 +762,7 @@ fs_generator::generate_varying_pull_constant_load_gen7(fs_inst *inst,
struct brw_reg index, struct brw_reg index,
struct brw_reg offset) struct brw_reg offset)
{ {
assert(intel->gen >= 7); assert(brw->gen >= 7);
/* Varying-offset pull constant loads are treated as a normal expression on /* Varying-offset pull constant loads are treated as a normal expression on
* gen7, so the fact that it's a send message is hidden at the IR level. * gen7, so the fact that it's a send message is hidden at the IR level.
*/ */
@ -810,7 +810,7 @@ fs_generator::generate_mov_dispatch_to_flags(fs_inst *inst)
struct brw_reg flags = brw_flag_reg(0, inst->flag_subreg); struct brw_reg flags = brw_flag_reg(0, inst->flag_subreg);
struct brw_reg dispatch_mask; struct brw_reg dispatch_mask;
if (intel->gen >= 6) if (brw->gen >= 6)
dispatch_mask = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW); dispatch_mask = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
else else
dispatch_mask = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW); dispatch_mask = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
@ -946,7 +946,7 @@ fs_generator::generate_pack_half_2x16_split(fs_inst *inst,
struct brw_reg x, struct brw_reg x,
struct brw_reg y) struct brw_reg y)
{ {
assert(intel->gen >= 7); assert(brw->gen >= 7);
assert(dst.type == BRW_REGISTER_TYPE_UD); assert(dst.type == BRW_REGISTER_TYPE_UD);
assert(x.type == BRW_REGISTER_TYPE_F); assert(x.type == BRW_REGISTER_TYPE_F);
assert(y.type == BRW_REGISTER_TYPE_F); assert(y.type == BRW_REGISTER_TYPE_F);
@ -984,7 +984,7 @@ fs_generator::generate_unpack_half_2x16_split(fs_inst *inst,
struct brw_reg dst, struct brw_reg dst,
struct brw_reg src) struct brw_reg src)
{ {
assert(intel->gen >= 7); assert(brw->gen >= 7);
assert(dst.type == BRW_REGISTER_TYPE_F); assert(dst.type == BRW_REGISTER_TYPE_F);
assert(src.type == BRW_REGISTER_TYPE_UD); assert(src.type == BRW_REGISTER_TYPE_UD);
@ -1014,7 +1014,7 @@ fs_generator::generate_shader_time_add(fs_inst *inst,
struct brw_reg offset, struct brw_reg offset,
struct brw_reg value) struct brw_reg value)
{ {
assert(intel->gen >= 7); assert(brw->gen >= 7);
brw_push_insn_state(p); brw_push_insn_state(p);
brw_set_mask_control(p, true); brw_set_mask_control(p, true);
@ -1281,7 +1281,7 @@ fs_generator::generate_code(exec_list *instructions)
case BRW_OPCODE_IF: case BRW_OPCODE_IF:
if (inst->src[0].file != BAD_FILE) { if (inst->src[0].file != BAD_FILE) {
/* The instruction has an embedded compare (only allowed on gen6) */ /* The instruction has an embedded compare (only allowed on gen6) */
assert(intel->gen == 6); assert(brw->gen == 6);
gen6_IF(p, inst->conditional_mod, src[0], src[1]); gen6_IF(p, inst->conditional_mod, src[0], src[1]);
} else { } else {
brw_IF(p, dispatch_width == 16 ? BRW_EXECUTE_16 : BRW_EXECUTE_8); brw_IF(p, dispatch_width == 16 ? BRW_EXECUTE_16 : BRW_EXECUTE_8);
@ -1305,7 +1305,7 @@ fs_generator::generate_code(exec_list *instructions)
break; break;
case BRW_OPCODE_CONTINUE: case BRW_OPCODE_CONTINUE:
/* FINISHME: We need to write the loop instruction support still. */ /* FINISHME: We need to write the loop instruction support still. */
if (intel->gen >= 6) if (brw->gen >= 6)
gen6_CONT(p); gen6_CONT(p);
else else
brw_CONT(p); brw_CONT(p);
@ -1323,11 +1323,11 @@ fs_generator::generate_code(exec_list *instructions)
case SHADER_OPCODE_LOG2: case SHADER_OPCODE_LOG2:
case SHADER_OPCODE_SIN: case SHADER_OPCODE_SIN:
case SHADER_OPCODE_COS: case SHADER_OPCODE_COS:
if (intel->gen >= 7) { if (brw->gen >= 7) {
generate_math1_gen7(inst, dst, src[0]); generate_math1_gen7(inst, dst, src[0]);
} else if (intel->gen == 6) { } else if (brw->gen == 6) {
generate_math1_gen6(inst, dst, src[0]); generate_math1_gen6(inst, dst, src[0]);
} else if (intel->gen == 5 || brw->is_g4x) { } else if (brw->gen == 5 || brw->is_g4x) {
generate_math_g45(inst, dst, src[0]); generate_math_g45(inst, dst, src[0]);
} else { } else {
generate_math_gen4(inst, dst, src[0]); generate_math_gen4(inst, dst, src[0]);
@ -1336,9 +1336,9 @@ fs_generator::generate_code(exec_list *instructions)
case SHADER_OPCODE_INT_QUOTIENT: case SHADER_OPCODE_INT_QUOTIENT:
case SHADER_OPCODE_INT_REMAINDER: case SHADER_OPCODE_INT_REMAINDER:
case SHADER_OPCODE_POW: case SHADER_OPCODE_POW:
if (intel->gen >= 7) { if (brw->gen >= 7) {
generate_math2_gen7(inst, dst, src[0], src[1]); generate_math2_gen7(inst, dst, src[0], src[1]);
} else if (intel->gen == 6) { } else if (brw->gen == 6) {
generate_math2_gen6(inst, dst, src[0], src[1]); generate_math2_gen6(inst, dst, src[0], src[1]);
} else { } else {
generate_math_gen4(inst, dst, src[0]); generate_math_gen4(inst, dst, src[0]);

View File

@ -500,9 +500,9 @@ fs_visitor::emit_fragment_program_code()
} }
fs_inst *inst; fs_inst *inst;
if (intel->gen >= 7) { if (brw->gen >= 7) {
inst = emit_texture_gen7(ir, dst, coordinate, shadow_c, lod, dpdy, sample_index); inst = emit_texture_gen7(ir, dst, coordinate, shadow_c, lod, dpdy, sample_index);
} else if (intel->gen >= 5) { } else if (brw->gen >= 5) {
inst = emit_texture_gen5(ir, dst, coordinate, shadow_c, lod, dpdy, sample_index); inst = emit_texture_gen5(ir, dst, coordinate, shadow_c, lod, dpdy, sample_index);
} else { } else {
inst = emit_texture_gen4(ir, dst, coordinate, shadow_c, lod, dpdy); inst = emit_texture_gen4(ir, dst, coordinate, shadow_c, lod, dpdy);

View File

@ -73,7 +73,6 @@ fs_visitor::assign_regs_trivial()
static void static void
brw_alloc_reg_set(struct brw_context *brw, int reg_width) brw_alloc_reg_set(struct brw_context *brw, int reg_width)
{ {
struct intel_context *intel = &brw->intel;
int base_reg_count = BRW_MAX_GRF / reg_width; int base_reg_count = BRW_MAX_GRF / reg_width;
int index = reg_width - 1; int index = reg_width - 1;
@ -107,7 +106,7 @@ brw_alloc_reg_set(struct brw_context *brw, int reg_width)
uint8_t *ra_reg_to_grf = ralloc_array(brw, uint8_t, ra_reg_count); uint8_t *ra_reg_to_grf = ralloc_array(brw, uint8_t, ra_reg_count);
struct ra_regs *regs = ra_alloc_reg_set(brw, ra_reg_count); struct ra_regs *regs = ra_alloc_reg_set(brw, ra_reg_count);
if (intel->gen >= 6) if (brw->gen >= 6)
ra_set_allocate_round_robin(regs); ra_set_allocate_round_robin(regs);
int *classes = ralloc_array(brw, int, class_count); int *classes = ralloc_array(brw, int, class_count);
int aligned_pairs_class = -1; int aligned_pairs_class = -1;
@ -147,7 +146,7 @@ brw_alloc_reg_set(struct brw_context *brw, int reg_width)
/* Add a special class for aligned pairs, which we'll put delta_x/y /* Add a special class for aligned pairs, which we'll put delta_x/y
* in on gen5 so that we can do PLN. * in on gen5 so that we can do PLN.
*/ */
if (brw->has_pln && reg_width == 1 && intel->gen < 6) { if (brw->has_pln && reg_width == 1 && brw->gen < 6) {
aligned_pairs_class = ra_alloc_reg_class(regs); aligned_pairs_class = ra_alloc_reg_class(regs);
for (int i = 0; i < pairs_reg_count; i++) { for (int i = 0; i < pairs_reg_count; i++) {
@ -285,7 +284,7 @@ fs_visitor::setup_payload_interference(struct ra_graph *g,
* two in the arguments (1 node). Pre-gen6, the deltas are computed * two in the arguments (1 node). Pre-gen6, the deltas are computed
* in normal VGRFs. * in normal VGRFs.
*/ */
if (intel->gen >= 6) { if (brw->gen >= 6) {
int delta_x_arg = 0; int delta_x_arg = 0;
if (inst->src[delta_x_arg].file == HW_REG && if (inst->src[delta_x_arg].file == HW_REG &&
inst->src[delta_x_arg].fixed_hw_reg.file == inst->src[delta_x_arg].fixed_hw_reg.file ==
@ -406,7 +405,7 @@ fs_visitor::assign_regs()
int first_payload_node = node_count; int first_payload_node = node_count;
node_count += payload_node_count; node_count += payload_node_count;
int first_mrf_hack_node = node_count; int first_mrf_hack_node = node_count;
if (intel->gen >= 7) if (brw->gen >= 7)
node_count += BRW_MAX_GRF - GEN7_MRF_HACK_START; node_count += BRW_MAX_GRF - GEN7_MRF_HACK_START;
struct ra_graph *g = ra_alloc_interference_graph(brw->wm.reg_sets[rsi].regs, struct ra_graph *g = ra_alloc_interference_graph(brw->wm.reg_sets[rsi].regs,
node_count); node_count);
@ -448,7 +447,7 @@ fs_visitor::assign_regs()
} }
setup_payload_interference(g, payload_node_count, first_payload_node); setup_payload_interference(g, payload_node_count, first_payload_node);
if (intel->gen >= 7) if (brw->gen >= 7)
setup_mrf_hack_interference(g, first_mrf_hack_node); setup_mrf_hack_interference(g, first_mrf_hack_node);
if (!ra_allocate_no_spills(g)) { if (!ra_allocate_no_spills(g)) {

View File

@ -200,7 +200,7 @@ fs_visitor::visit(ir_dereference_array *ir)
void void
fs_visitor::emit_lrp(fs_reg dst, fs_reg x, fs_reg y, fs_reg a) fs_visitor::emit_lrp(fs_reg dst, fs_reg x, fs_reg y, fs_reg a)
{ {
if (intel->gen < 6 || if (brw->gen < 6 ||
!x.is_valid_3src() || !x.is_valid_3src() ||
!y.is_valid_3src() || !y.is_valid_3src() ||
!a.is_valid_3src()) { !a.is_valid_3src()) {
@ -230,7 +230,7 @@ fs_visitor::emit_minmax(uint32_t conditionalmod, fs_reg dst,
{ {
fs_inst *inst; fs_inst *inst;
if (intel->gen >= 6) { if (brw->gen >= 6) {
inst = emit(BRW_OPCODE_SEL, dst, src0, src1); inst = emit(BRW_OPCODE_SEL, dst, src0, src1);
inst->conditional_mod = conditionalmod; inst->conditional_mod = conditionalmod;
} else { } else {
@ -280,7 +280,7 @@ bool
fs_visitor::try_emit_mad(ir_expression *ir, int mul_arg) fs_visitor::try_emit_mad(ir_expression *ir, int mul_arg)
{ {
/* 3-src instructions were introduced in gen6. */ /* 3-src instructions were introduced in gen6. */
if (intel->gen < 6) if (brw->gen < 6)
return false; return false;
/* MAD can only handle floating-point data. */ /* MAD can only handle floating-point data. */
@ -429,7 +429,7 @@ fs_visitor::visit(ir_expression *ir)
* FINISHME: Emit just the MUL if we know an operand is small * FINISHME: Emit just the MUL if we know an operand is small
* enough. * enough.
*/ */
if (intel->gen >= 7 && dispatch_width == 16) if (brw->gen >= 7 && dispatch_width == 16)
fail("16-wide explicit accumulator operands unsupported\n"); fail("16-wide explicit accumulator operands unsupported\n");
struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D); struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D);
@ -1321,8 +1321,8 @@ fs_visitor::rescale_texcoord(ir_texture *ir, fs_reg coordinate,
* tracking to get the scaling factor. * tracking to get the scaling factor.
*/ */
if (is_rect && if (is_rect &&
(intel->gen < 6 || (brw->gen < 6 ||
(intel->gen >= 6 && (c->key.tex.gl_clamp_mask[0] & (1 << sampler) || (brw->gen >= 6 && (c->key.tex.gl_clamp_mask[0] & (1 << sampler) ||
c->key.tex.gl_clamp_mask[1] & (1 << sampler))))) { c->key.tex.gl_clamp_mask[1] & (1 << sampler))))) {
struct gl_program_parameter_list *params = fp->Base.Parameters; struct gl_program_parameter_list *params = fp->Base.Parameters;
int tokens[STATE_LENGTH] = { int tokens[STATE_LENGTH] = {
@ -1353,7 +1353,7 @@ fs_visitor::rescale_texcoord(ir_texture *ir, fs_reg coordinate,
* texture coordinates. We use the program parameter state * texture coordinates. We use the program parameter state
* tracking to get the scaling factor. * tracking to get the scaling factor.
*/ */
if (intel->gen < 6 && is_rect) { if (brw->gen < 6 && is_rect) {
fs_reg dst = fs_reg(this, ir->coordinate->type); fs_reg dst = fs_reg(this, ir->coordinate->type);
fs_reg src = coordinate; fs_reg src = coordinate;
coordinate = dst; coordinate = dst;
@ -1478,10 +1478,10 @@ fs_visitor::visit(ir_texture *ir)
*/ */
fs_reg dst = fs_reg(this, glsl_type::get_instance(ir->type->base_type, 4, 1)); fs_reg dst = fs_reg(this, glsl_type::get_instance(ir->type->base_type, 4, 1));
if (intel->gen >= 7) { if (brw->gen >= 7) {
inst = emit_texture_gen7(ir, dst, coordinate, shadow_comparitor, inst = emit_texture_gen7(ir, dst, coordinate, shadow_comparitor,
lod, lod2, sample_index); lod, lod2, sample_index);
} else if (intel->gen >= 5) { } else if (brw->gen >= 5) {
inst = emit_texture_gen5(ir, dst, coordinate, shadow_comparitor, inst = emit_texture_gen5(ir, dst, coordinate, shadow_comparitor,
lod, lod2, sample_index); lod, lod2, sample_index);
} else { } else {
@ -1607,7 +1607,7 @@ fs_visitor::visit(ir_discard *ir)
cmp->predicate = BRW_PREDICATE_NORMAL; cmp->predicate = BRW_PREDICATE_NORMAL;
cmp->flag_subreg = 1; cmp->flag_subreg = 1;
if (intel->gen >= 6) { if (brw->gen >= 6) {
/* For performance, after a discard, jump to the end of the shader. /* For performance, after a discard, jump to the end of the shader.
* However, many people will do foliage by discarding based on a * However, many people will do foliage by discarding based on a
* texture's alpha mask, and then continue on to texture with the * texture's alpha mask, and then continue on to texture with the
@ -1722,7 +1722,7 @@ fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir)
goto out; goto out;
case ir_unop_f2b: case ir_unop_f2b:
if (intel->gen >= 6) { if (brw->gen >= 6) {
emit(CMP(reg_null_d, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ)); emit(CMP(reg_null_d, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ));
} else { } else {
inst = emit(MOV(reg_null_f, op[0])); inst = emit(MOV(reg_null_f, op[0]));
@ -1731,7 +1731,7 @@ fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir)
break; break;
case ir_unop_i2b: case ir_unop_i2b:
if (intel->gen >= 6) { if (brw->gen >= 6) {
emit(CMP(reg_null_d, op[0], fs_reg(0), BRW_CONDITIONAL_NZ)); emit(CMP(reg_null_d, op[0], fs_reg(0), BRW_CONDITIONAL_NZ));
} else { } else {
inst = emit(MOV(reg_null_d, op[0])); inst = emit(MOV(reg_null_d, op[0]));
@ -1841,7 +1841,7 @@ fs_visitor::emit_if_gen6(ir_if *ir)
void void
fs_visitor::visit(ir_if *ir) fs_visitor::visit(ir_if *ir)
{ {
if (intel->gen < 6 && dispatch_width == 16) { if (brw->gen < 6 && dispatch_width == 16) {
fail("Can't support (non-uniform) control flow on 16-wide\n"); fail("Can't support (non-uniform) control flow on 16-wide\n");
} }
@ -1850,7 +1850,7 @@ fs_visitor::visit(ir_if *ir)
*/ */
this->base_ir = ir->condition; this->base_ir = ir->condition;
if (intel->gen == 6) { if (brw->gen == 6) {
emit_if_gen6(ir); emit_if_gen6(ir);
} else { } else {
emit_bool_to_cond_code(ir->condition); emit_bool_to_cond_code(ir->condition);
@ -1884,7 +1884,7 @@ fs_visitor::visit(ir_loop *ir)
{ {
fs_reg counter = reg_undef; fs_reg counter = reg_undef;
if (intel->gen < 6 && dispatch_width == 16) { if (brw->gen < 6 && dispatch_width == 16) {
fail("Can't support (non-uniform) control flow on 16-wide\n"); fail("Can't support (non-uniform) control flow on 16-wide\n");
} }
@ -2158,7 +2158,7 @@ fs_visitor::emit_color_write(int target, int index, int first_color_mrf)
color.reg_offset += index; color.reg_offset += index;
if (dispatch_width == 8 || intel->gen >= 6) { if (dispatch_width == 8 || brw->gen >= 6) {
/* SIMD8 write looks like: /* SIMD8 write looks like:
* m + 0: r0 * m + 0: r0
* m + 1: r1 * m + 1: r1
@ -2244,7 +2244,7 @@ fs_visitor::emit_fb_writes()
* dispatched. This field is only required for the end-of- * dispatched. This field is only required for the end-of-
* thread message and on all dual-source messages." * thread message and on all dual-source messages."
*/ */
if (intel->gen >= 6 && if (brw->gen >= 6 &&
!this->fp->UsesKill && !this->fp->UsesKill &&
!do_dual_src && !do_dual_src &&
c->key.nr_color_regions == 1) { c->key.nr_color_regions == 1) {
@ -2252,7 +2252,7 @@ fs_visitor::emit_fb_writes()
} }
if (header_present) { if (header_present) {
src0_alpha_to_render_target = intel->gen >= 6 && src0_alpha_to_render_target = brw->gen >= 6 &&
!do_dual_src && !do_dual_src &&
c->key.replicate_alpha; c->key.replicate_alpha;
/* m2, m3 header */ /* m2, m3 header */
@ -2275,7 +2275,7 @@ fs_visitor::emit_fb_writes()
nr += reg_width; nr += reg_width;
if (c->source_depth_to_render_target) { if (c->source_depth_to_render_target) {
if (intel->gen == 6 && dispatch_width == 16) { if (brw->gen == 6 && dispatch_width == 16) {
/* For outputting oDepth on gen6, SIMD8 writes have to be /* For outputting oDepth on gen6, SIMD8 writes have to be
* used. This would require 8-wide moves of each half to * used. This would require 8-wide moves of each half to
* message regs, kind of like pre-gen5 SIMD16 FB writes. * message regs, kind of like pre-gen5 SIMD16 FB writes.
@ -2449,7 +2449,7 @@ fs_visitor::fs_visitor(struct brw_context *brw,
memset(this->outputs, 0, sizeof(this->outputs)); memset(this->outputs, 0, sizeof(this->outputs));
memset(this->output_components, 0, sizeof(this->output_components)); memset(this->output_components, 0, sizeof(this->output_components));
this->first_non_payload_grf = 0; this->first_non_payload_grf = 0;
this->max_grf = intel->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF; this->max_grf = brw->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF;
this->current_annotation = NULL; this->current_annotation = NULL;
this->base_ir = NULL; this->base_ir = NULL;

View File

@ -48,7 +48,6 @@
static void compile_gs_prog( struct brw_context *brw, static void compile_gs_prog( struct brw_context *brw,
struct brw_gs_prog_key *key ) struct brw_gs_prog_key *key )
{ {
struct intel_context *intel = &brw->intel;
struct brw_gs_compile c; struct brw_gs_compile c;
const GLuint *program; const GLuint *program;
void *mem_ctx; void *mem_ctx;
@ -73,7 +72,7 @@ static void compile_gs_prog( struct brw_context *brw,
*/ */
brw_set_mask_control(&c.func, BRW_MASK_DISABLE); brw_set_mask_control(&c.func, BRW_MASK_DISABLE);
if (intel->gen >= 6) { if (brw->gen >= 6) {
unsigned num_verts; unsigned num_verts;
bool check_edge_flag; bool check_edge_flag;
/* On Sandybridge, we use the GS for implementing transform feedback /* On Sandybridge, we use the GS for implementing transform feedback
@ -139,7 +138,7 @@ static void compile_gs_prog( struct brw_context *brw,
printf("gs:\n"); printf("gs:\n");
for (i = 0; i < program_size / sizeof(struct brw_instruction); i++) for (i = 0; i < program_size / sizeof(struct brw_instruction); i++)
brw_disasm(stdout, &((struct brw_instruction *)program)[i], brw_disasm(stdout, &((struct brw_instruction *)program)[i],
intel->gen); brw->gen);
printf("\n"); printf("\n");
} }
@ -162,7 +161,6 @@ static void populate_key( struct brw_context *brw,
}; };
struct gl_context *ctx = &brw->intel.ctx; struct gl_context *ctx = &brw->intel.ctx;
struct intel_context *intel = &brw->intel;
memset(key, 0, sizeof(*key)); memset(key, 0, sizeof(*key));
@ -181,10 +179,10 @@ static void populate_key( struct brw_context *brw,
key->pv_first = true; key->pv_first = true;
} }
if (intel->gen >= 7) { if (brw->gen >= 7) {
/* On Gen7 and later, we don't use GS (yet). */ /* On Gen7 and later, we don't use GS (yet). */
key->need_gs_prog = false; key->need_gs_prog = false;
} else if (intel->gen == 6) { } else if (brw->gen == 6) {
/* On Gen6, GS is used for transform feedback. */ /* On Gen6, GS is used for transform feedback. */
/* BRW_NEW_TRANSFORM_FEEDBACK */ /* BRW_NEW_TRANSFORM_FEEDBACK */
if (_mesa_is_xfb_active_and_unpaused(ctx)) { if (_mesa_is_xfb_active_and_unpaused(ctx)) {

View File

@ -229,14 +229,14 @@ static void brw_gs_ff_sync(struct brw_gs_compile *c, int num_prim)
void brw_gs_quads( struct brw_gs_compile *c, struct brw_gs_prog_key *key ) void brw_gs_quads( struct brw_gs_compile *c, struct brw_gs_prog_key *key )
{ {
struct intel_context *intel = &c->func.brw->intel; struct brw_context *brw = c->func.brw;
brw_gs_alloc_regs(c, 4, false); brw_gs_alloc_regs(c, 4, false);
brw_gs_initialize_header(c); brw_gs_initialize_header(c);
/* Use polygons for correct edgeflag behaviour. Note that vertex 3 /* Use polygons for correct edgeflag behaviour. Note that vertex 3
* is the PV for quads, but vertex 0 for polygons: * is the PV for quads, but vertex 0 for polygons:
*/ */
if (intel->gen == 5) if (brw->gen == 5)
brw_gs_ff_sync(c, 1); brw_gs_ff_sync(c, 1);
brw_gs_overwrite_header_dw2( brw_gs_overwrite_header_dw2(
c, ((_3DPRIM_POLYGON << URB_WRITE_PRIM_TYPE_SHIFT) c, ((_3DPRIM_POLYGON << URB_WRITE_PRIM_TYPE_SHIFT)
@ -267,12 +267,12 @@ void brw_gs_quads( struct brw_gs_compile *c, struct brw_gs_prog_key *key )
void brw_gs_quad_strip( struct brw_gs_compile *c, struct brw_gs_prog_key *key ) void brw_gs_quad_strip( struct brw_gs_compile *c, struct brw_gs_prog_key *key )
{ {
struct intel_context *intel = &c->func.brw->intel; struct brw_context *brw = c->func.brw;
brw_gs_alloc_regs(c, 4, false); brw_gs_alloc_regs(c, 4, false);
brw_gs_initialize_header(c); brw_gs_initialize_header(c);
if (intel->gen == 5) if (brw->gen == 5)
brw_gs_ff_sync(c, 1); brw_gs_ff_sync(c, 1);
brw_gs_overwrite_header_dw2( brw_gs_overwrite_header_dw2(
c, ((_3DPRIM_POLYGON << URB_WRITE_PRIM_TYPE_SHIFT) c, ((_3DPRIM_POLYGON << URB_WRITE_PRIM_TYPE_SHIFT)
@ -303,12 +303,12 @@ void brw_gs_quad_strip( struct brw_gs_compile *c, struct brw_gs_prog_key *key )
void brw_gs_lines( struct brw_gs_compile *c ) void brw_gs_lines( struct brw_gs_compile *c )
{ {
struct intel_context *intel = &c->func.brw->intel; struct brw_context *brw = c->func.brw;
brw_gs_alloc_regs(c, 2, false); brw_gs_alloc_regs(c, 2, false);
brw_gs_initialize_header(c); brw_gs_initialize_header(c);
if (intel->gen == 5) if (brw->gen == 5)
brw_gs_ff_sync(c, 1); brw_gs_ff_sync(c, 1);
brw_gs_overwrite_header_dw2( brw_gs_overwrite_header_dw2(
c, ((_3DPRIM_LINESTRIP << URB_WRITE_PRIM_TYPE_SHIFT) c, ((_3DPRIM_LINESTRIP << URB_WRITE_PRIM_TYPE_SHIFT)

View File

@ -38,7 +38,6 @@
static void static void
brw_upload_gs_unit(struct brw_context *brw) brw_upload_gs_unit(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel;
struct brw_gs_unit_state *gs; struct brw_gs_unit_state *gs;
gs = brw_state_batch(brw, AUB_TRACE_GS_STATE, gs = brw_state_batch(brw, AUB_TRACE_GS_STATE,
@ -77,7 +76,7 @@ brw_upload_gs_unit(struct brw_context *brw)
gs->thread4.max_threads = 0; gs->thread4.max_threads = 0;
} }
if (intel->gen == 5) if (brw->gen == 5)
gs->thread4.rendering_enable = 1; gs->thread4.rendering_enable = 1;
if (unlikely(INTEL_DEBUG & DEBUG_STATS)) if (unlikely(INTEL_DEBUG & DEBUG_STATS))

View File

@ -168,8 +168,7 @@ bool
brw_lower_texture_gradients(struct brw_context *brw, brw_lower_texture_gradients(struct brw_context *brw,
struct exec_list *instructions) struct exec_list *instructions)
{ {
struct intel_context *intel = &brw->intel; bool has_sample_d_c = brw->gen >= 8 || brw->is_haswell;
bool has_sample_d_c = intel->gen >= 8 || brw->is_haswell;
lower_texture_grad_visitor v(has_sample_d_c); lower_texture_grad_visitor v(has_sample_d_c);
visit_list_elements(&v, instructions); visit_list_elements(&v, instructions);

View File

@ -141,9 +141,7 @@ const struct brw_tracked_state gen6_binding_table_pointers = {
*/ */
static void upload_pipelined_state_pointers(struct brw_context *brw ) static void upload_pipelined_state_pointers(struct brw_context *brw )
{ {
struct intel_context *intel = &brw->intel; if (brw->gen == 5) {
if (intel->gen == 5) {
/* Need to flush before changing clip max threads for errata. */ /* Need to flush before changing clip max threads for errata. */
BEGIN_BATCH(1); BEGIN_BATCH(1);
OUT_BATCH(MI_FLUSH); OUT_BATCH(MI_FLUSH);
@ -222,7 +220,7 @@ brw_depthbuffer_format(struct brw_context *brw)
case MESA_FORMAT_Z32_FLOAT: case MESA_FORMAT_Z32_FLOAT:
return BRW_DEPTHFORMAT_D32_FLOAT; return BRW_DEPTHFORMAT_D32_FLOAT;
case MESA_FORMAT_X8_Z24: case MESA_FORMAT_X8_Z24:
if (intel->gen >= 6) { if (brw->gen >= 6) {
return BRW_DEPTHFORMAT_D24_UNORM_X8_UINT; return BRW_DEPTHFORMAT_D24_UNORM_X8_UINT;
} else { } else {
/* Use D24_UNORM_S8, not D24_UNORM_X8. /* Use D24_UNORM_S8, not D24_UNORM_X8.
@ -385,7 +383,7 @@ brw_workaround_depthstencil_alignment(struct brw_context *brw,
rebase_depth = true; rebase_depth = true;
/* We didn't even have intra-tile offsets before g45. */ /* We didn't even have intra-tile offsets before g45. */
if (intel->gen == 4 && !brw->is_g4x) { if (brw->gen == 4 && !brw->is_g4x) {
if (tile_x || tile_y) if (tile_x || tile_y)
rebase_depth = true; rebase_depth = true;
} }
@ -444,7 +442,7 @@ brw_workaround_depthstencil_alignment(struct brw_context *brw,
if (stencil_tile_x & 7 || stencil_tile_y & 7) if (stencil_tile_x & 7 || stencil_tile_y & 7)
rebase_stencil = true; rebase_stencil = true;
if (intel->gen == 4 && !brw->is_g4x) { if (brw->gen == 4 && !brw->is_g4x) {
if (stencil_tile_x || stencil_tile_y) if (stencil_tile_x || stencil_tile_y)
rebase_stencil = true; rebase_stencil = true;
} }
@ -582,7 +580,7 @@ brw_emit_depthbuffer(struct brw_context *brw)
separate_stencil = stencil_mt->format == MESA_FORMAT_S8; separate_stencil = stencil_mt->format == MESA_FORMAT_S8;
/* Gen7 supports only separate stencil */ /* Gen7 supports only separate stencil */
assert(separate_stencil || intel->gen < 7); assert(separate_stencil || brw->gen < 7);
} }
/* If there's a packed depth/stencil bound to stencil only, we need to /* If there's a packed depth/stencil bound to stencil only, we need to
@ -602,14 +600,14 @@ brw_emit_depthbuffer(struct brw_context *brw)
* set to the same value. Gens after 7 implicitly always set * set to the same value. Gens after 7 implicitly always set
* Separate_Stencil_Enable; software cannot disable it. * Separate_Stencil_Enable; software cannot disable it.
*/ */
if ((intel->gen < 7 && hiz) || intel->gen >= 7) { if ((brw->gen < 7 && hiz) || brw->gen >= 7) {
assert(!_mesa_is_format_packed_depth_stencil(depth_mt->format)); assert(!_mesa_is_format_packed_depth_stencil(depth_mt->format));
} }
/* Prior to Gen7, if using separate stencil, hiz must be enabled. */ /* Prior to Gen7, if using separate stencil, hiz must be enabled. */
assert(intel->gen >= 7 || !separate_stencil || hiz); assert(brw->gen >= 7 || !separate_stencil || hiz);
assert(intel->gen < 6 || depth_mt->region->tiling == I915_TILING_Y); assert(brw->gen < 6 || depth_mt->region->tiling == I915_TILING_Y);
assert(!hiz || depth_mt->region->tiling == I915_TILING_Y); assert(!hiz || depth_mt->region->tiling == I915_TILING_Y);
depthbuffer_format = brw_depthbuffer_format(brw); depthbuffer_format = brw_depthbuffer_format(brw);
@ -652,8 +650,6 @@ brw_emit_depth_stencil_hiz(struct brw_context *brw,
uint32_t width, uint32_t height, uint32_t width, uint32_t height,
uint32_t tile_x, uint32_t tile_y) uint32_t tile_x, uint32_t tile_y)
{ {
struct intel_context *intel = &brw->intel;
/* Enable the hiz bit if we're doing separate stencil, because it and the /* Enable the hiz bit if we're doing separate stencil, because it and the
* separate stencil bit must have the same value. From Section 2.11.5.6.1.1 * separate stencil bit must have the same value. From Section 2.11.5.6.1.1
* 3DSTATE_DEPTH_BUFFER, Bit 1.21 "Separate Stencil Enable": * 3DSTATE_DEPTH_BUFFER, Bit 1.21 "Separate Stencil Enable":
@ -669,15 +665,15 @@ brw_emit_depth_stencil_hiz(struct brw_context *brw,
/* 3DSTATE_DEPTH_BUFFER, 3DSTATE_STENCIL_BUFFER are both /* 3DSTATE_DEPTH_BUFFER, 3DSTATE_STENCIL_BUFFER are both
* non-pipelined state that will need the PIPE_CONTROL workaround. * non-pipelined state that will need the PIPE_CONTROL workaround.
*/ */
if (intel->gen == 6) { if (brw->gen == 6) {
intel_emit_post_sync_nonzero_flush(brw); intel_emit_post_sync_nonzero_flush(brw);
intel_emit_depth_stall_flushes(brw); intel_emit_depth_stall_flushes(brw);
} }
unsigned int len; unsigned int len;
if (intel->gen >= 6) if (brw->gen >= 6)
len = 7; len = 7;
else if (brw->is_g4x || intel->gen == 5) else if (brw->is_g4x || brw->gen == 5)
len = 6; len = 6;
else else
len = 5; len = 5;
@ -705,12 +701,12 @@ brw_emit_depth_stencil_hiz(struct brw_context *brw,
((height + tile_y - 1) << 19)); ((height + tile_y - 1) << 19));
OUT_BATCH(0); OUT_BATCH(0);
if (brw->is_g4x || intel->gen >= 5) if (brw->is_g4x || brw->gen >= 5)
OUT_BATCH(tile_x | (tile_y << 16)); OUT_BATCH(tile_x | (tile_y << 16));
else else
assert(tile_x == 0 && tile_y == 0); assert(tile_x == 0 && tile_y == 0);
if (intel->gen >= 6) if (brw->gen >= 6)
OUT_BATCH(0); OUT_BATCH(0);
ADVANCE_BATCH(); ADVANCE_BATCH();
@ -775,8 +771,8 @@ brw_emit_depth_stencil_hiz(struct brw_context *brw,
* 3DSTATE_CLEAR_PARAMS packet must follow the DEPTH_BUFFER_STATE packet * 3DSTATE_CLEAR_PARAMS packet must follow the DEPTH_BUFFER_STATE packet
* when HiZ is enabled and the DEPTH_BUFFER_STATE changes. * when HiZ is enabled and the DEPTH_BUFFER_STATE changes.
*/ */
if (intel->gen >= 6 || hiz) { if (brw->gen >= 6 || hiz) {
if (intel->gen == 6) if (brw->gen == 6)
intel_emit_post_sync_nonzero_flush(brw); intel_emit_post_sync_nonzero_flush(brw);
BEGIN_BATCH(2); BEGIN_BATCH(2);
@ -805,7 +801,6 @@ const struct brw_tracked_state brw_depthbuffer = {
static void upload_polygon_stipple(struct brw_context *brw) static void upload_polygon_stipple(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &brw->intel.ctx; struct gl_context *ctx = &brw->intel.ctx;
GLuint i; GLuint i;
@ -813,7 +808,7 @@ static void upload_polygon_stipple(struct brw_context *brw)
if (!ctx->Polygon.StippleFlag) if (!ctx->Polygon.StippleFlag)
return; return;
if (intel->gen == 6) if (brw->gen == 6)
intel_emit_post_sync_nonzero_flush(brw); intel_emit_post_sync_nonzero_flush(brw);
BEGIN_BATCH(33); BEGIN_BATCH(33);
@ -854,14 +849,13 @@ const struct brw_tracked_state brw_polygon_stipple = {
static void upload_polygon_stipple_offset(struct brw_context *brw) static void upload_polygon_stipple_offset(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &brw->intel.ctx; struct gl_context *ctx = &brw->intel.ctx;
/* _NEW_POLYGON */ /* _NEW_POLYGON */
if (!ctx->Polygon.StippleFlag) if (!ctx->Polygon.StippleFlag)
return; return;
if (intel->gen == 6) if (brw->gen == 6)
intel_emit_post_sync_nonzero_flush(brw); intel_emit_post_sync_nonzero_flush(brw);
BEGIN_BATCH(2); BEGIN_BATCH(2);
@ -897,13 +891,12 @@ const struct brw_tracked_state brw_polygon_stipple_offset = {
*/ */
static void upload_aa_line_parameters(struct brw_context *brw) static void upload_aa_line_parameters(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &brw->intel.ctx; struct gl_context *ctx = &brw->intel.ctx;
if (!ctx->Line.SmoothFlag || !brw->has_aa_line_parameters) if (!ctx->Line.SmoothFlag || !brw->has_aa_line_parameters)
return; return;
if (intel->gen == 6) if (brw->gen == 6)
intel_emit_post_sync_nonzero_flush(brw); intel_emit_post_sync_nonzero_flush(brw);
OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2)); OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
@ -928,7 +921,6 @@ const struct brw_tracked_state brw_aa_line_parameters = {
static void upload_line_stipple(struct brw_context *brw) static void upload_line_stipple(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &brw->intel.ctx; struct gl_context *ctx = &brw->intel.ctx;
GLfloat tmp; GLfloat tmp;
GLint tmpi; GLint tmpi;
@ -936,14 +928,14 @@ static void upload_line_stipple(struct brw_context *brw)
if (!ctx->Line.StippleFlag) if (!ctx->Line.StippleFlag)
return; return;
if (intel->gen == 6) if (brw->gen == 6)
intel_emit_post_sync_nonzero_flush(brw); intel_emit_post_sync_nonzero_flush(brw);
BEGIN_BATCH(3); BEGIN_BATCH(3);
OUT_BATCH(_3DSTATE_LINE_STIPPLE_PATTERN << 16 | (3 - 2)); OUT_BATCH(_3DSTATE_LINE_STIPPLE_PATTERN << 16 | (3 - 2));
OUT_BATCH(ctx->Line.StipplePattern); OUT_BATCH(ctx->Line.StipplePattern);
if (intel->gen >= 7) { if (brw->gen >= 7) {
/* in U1.16 */ /* in U1.16 */
tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor; tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor;
tmpi = tmp * (1<<16); tmpi = tmp * (1<<16);
@ -976,10 +968,8 @@ const struct brw_tracked_state brw_line_stipple = {
void void
brw_upload_invariant_state(struct brw_context *brw) brw_upload_invariant_state(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel;
/* 3DSTATE_SIP, 3DSTATE_MULTISAMPLE, etc. are nonpipelined. */ /* 3DSTATE_SIP, 3DSTATE_MULTISAMPLE, etc. are nonpipelined. */
if (intel->gen == 6) if (brw->gen == 6)
intel_emit_post_sync_nonzero_flush(brw); intel_emit_post_sync_nonzero_flush(brw);
/* Select the 3D pipeline (as opposed to media) */ /* Select the 3D pipeline (as opposed to media) */
@ -987,7 +977,7 @@ brw_upload_invariant_state(struct brw_context *brw)
OUT_BATCH(brw->CMD_PIPELINE_SELECT << 16 | 0); OUT_BATCH(brw->CMD_PIPELINE_SELECT << 16 | 0);
ADVANCE_BATCH(); ADVANCE_BATCH();
if (intel->gen < 6) { if (brw->gen < 6) {
/* Disable depth offset clamping. */ /* Disable depth offset clamping. */
BEGIN_BATCH(2); BEGIN_BATCH(2);
OUT_BATCH(_3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP << 16 | (2 - 2)); OUT_BATCH(_3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP << 16 | (2 - 2));
@ -1027,8 +1017,6 @@ const struct brw_tracked_state brw_invariant_state = {
*/ */
static void upload_state_base_address( struct brw_context *brw ) static void upload_state_base_address( struct brw_context *brw )
{ {
struct intel_context *intel = &brw->intel;
/* FINISHME: According to section 3.6.1 "STATE_BASE_ADDRESS" of /* FINISHME: According to section 3.6.1 "STATE_BASE_ADDRESS" of
* vol1a of the G45 PRM, MI_FLUSH with the ISC invalidate should be * vol1a of the G45 PRM, MI_FLUSH with the ISC invalidate should be
* programmed prior to STATE_BASE_ADDRESS. * programmed prior to STATE_BASE_ADDRESS.
@ -1038,8 +1026,8 @@ static void upload_state_base_address( struct brw_context *brw )
* maybe this isn't required for us in particular. * maybe this isn't required for us in particular.
*/ */
if (intel->gen >= 6) { if (brw->gen >= 6) {
if (intel->gen == 6) if (brw->gen == 6)
intel_emit_post_sync_nonzero_flush(brw); intel_emit_post_sync_nonzero_flush(brw);
BEGIN_BATCH(10); BEGIN_BATCH(10);
@ -1078,7 +1066,7 @@ static void upload_state_base_address( struct brw_context *brw )
OUT_BATCH(1); /* Indirect object upper bound */ OUT_BATCH(1); /* Indirect object upper bound */
OUT_BATCH(1); /* Instruction access upper bound */ OUT_BATCH(1); /* Instruction access upper bound */
ADVANCE_BATCH(); ADVANCE_BATCH();
} else if (intel->gen == 5) { } else if (brw->gen == 5) {
BEGIN_BATCH(8); BEGIN_BATCH(8);
OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2)); OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
OUT_BATCH(1); /* General state base address */ OUT_BATCH(1); /* General state base address */

View File

@ -80,10 +80,9 @@ can_cut_index_handle_prims(struct gl_context *ctx,
const struct _mesa_index_buffer *ib) const struct _mesa_index_buffer *ib)
{ {
struct brw_context *brw = brw_context(ctx); struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
/* Otherwise Haswell can do it all. */ /* Otherwise Haswell can do it all. */
if (intel->gen >= 8 || brw->is_haswell) if (brw->gen >= 8 || brw->is_haswell)
return true; return true;
if (!can_cut_index_handle_restart_index(ctx, ib)) { if (!can_cut_index_handle_restart_index(ctx, ib)) {

View File

@ -49,10 +49,9 @@
static void static void
write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx) write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
{ {
struct intel_context *intel = &brw->intel; if (brw->gen >= 6) {
if (intel->gen >= 6) {
/* Emit workaround flushes: */ /* Emit workaround flushes: */
if (intel->gen == 6) { if (brw->gen == 6) {
/* The timestamp write below is a non-zero post-sync op, which on /* The timestamp write below is a non-zero post-sync op, which on
* Gen6 necessitates a CS stall. CS stalls need stall at scoreboard * Gen6 necessitates a CS stall. CS stalls need stall at scoreboard
* set. See the comments for intel_emit_post_sync_nonzero_flush(). * set. See the comments for intel_emit_post_sync_nonzero_flush().
@ -95,8 +94,7 @@ write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
static void static void
write_depth_count(struct brw_context *brw, drm_intel_bo *query_bo, int idx) write_depth_count(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
{ {
struct intel_context *intel = &brw->intel; assert(brw->gen < 6);
assert(intel->gen < 6);
BEGIN_BATCH(4); BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2) | OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2) |
@ -123,12 +121,11 @@ brw_queryobj_get_results(struct gl_context *ctx,
struct brw_query_object *query) struct brw_query_object *query)
{ {
struct brw_context *brw = brw_context(ctx); struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
int i; int i;
uint64_t *results; uint64_t *results;
assert(intel->gen < 6); assert(brw->gen < 6);
if (query->bo == NULL) if (query->bo == NULL)
return; return;
@ -245,10 +242,9 @@ static void
brw_begin_query(struct gl_context *ctx, struct gl_query_object *q) brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
{ {
struct brw_context *brw = brw_context(ctx); struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct brw_query_object *query = (struct brw_query_object *)q; struct brw_query_object *query = (struct brw_query_object *)q;
assert(intel->gen < 6); assert(brw->gen < 6);
switch (query->Base.Target) { switch (query->Base.Target) {
case GL_TIME_ELAPSED_EXT: case GL_TIME_ELAPSED_EXT:
@ -318,10 +314,9 @@ static void
brw_end_query(struct gl_context *ctx, struct gl_query_object *q) brw_end_query(struct gl_context *ctx, struct gl_query_object *q)
{ {
struct brw_context *brw = brw_context(ctx); struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct brw_query_object *query = (struct brw_query_object *)q; struct brw_query_object *query = (struct brw_query_object *)q;
assert(intel->gen < 6); assert(brw->gen < 6);
switch (query->Base.Target) { switch (query->Base.Target) {
case GL_TIME_ELAPSED_EXT: case GL_TIME_ELAPSED_EXT:
@ -375,7 +370,7 @@ static void brw_wait_query(struct gl_context *ctx, struct gl_query_object *q)
{ {
struct brw_query_object *query = (struct brw_query_object *)q; struct brw_query_object *query = (struct brw_query_object *)q;
assert(intel_context(ctx)->gen < 6); assert(brw_context(ctx)->gen < 6);
brw_queryobj_get_results(ctx, query); brw_queryobj_get_results(ctx, query);
query->Base.Ready = true; query->Base.Ready = true;
@ -390,10 +385,9 @@ static void brw_wait_query(struct gl_context *ctx, struct gl_query_object *q)
static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q) static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
{ {
struct brw_context *brw = brw_context(ctx); struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct brw_query_object *query = (struct brw_query_object *)q; struct brw_query_object *query = (struct brw_query_object *)q;
assert(intel->gen < 6); assert(brw->gen < 6);
/* From the GL_ARB_occlusion_query spec: /* From the GL_ARB_occlusion_query spec:
* *
@ -421,9 +415,8 @@ static void
ensure_bo_has_space(struct gl_context *ctx, struct brw_query_object *query) ensure_bo_has_space(struct gl_context *ctx, struct brw_query_object *query)
{ {
struct brw_context *brw = brw_context(ctx); struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
assert(intel->gen < 6); assert(brw->gen < 6);
if (!query->bo || query->last_index * 2 + 1 >= 4096 / sizeof(uint64_t)) { if (!query->bo || query->last_index * 2 + 1 >= 4096 / sizeof(uint64_t)) {
@ -463,8 +456,7 @@ ensure_bo_has_space(struct gl_context *ctx, struct brw_query_object *query)
void void
brw_emit_query_begin(struct brw_context *brw) brw_emit_query_begin(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel; struct gl_context *ctx = &brw->intel.ctx;
struct gl_context *ctx = &intel->ctx;
struct brw_query_object *query = brw->query.obj; struct brw_query_object *query = brw->query.obj;
if (brw->hw_ctx) if (brw->hw_ctx)

View File

@ -61,8 +61,6 @@ class schedule_node : public exec_node
public: public:
schedule_node(backend_instruction *inst, const struct brw_context *brw) schedule_node(backend_instruction *inst, const struct brw_context *brw)
{ {
const struct intel_context *intel = &brw->intel;
this->inst = inst; this->inst = inst;
this->child_array_size = 0; this->child_array_size = 0;
this->children = NULL; this->children = NULL;
@ -74,7 +72,7 @@ public:
/* We can't measure Gen6 timings directly but expect them to be much /* We can't measure Gen6 timings directly but expect them to be much
* closer to Gen7 than Gen4. * closer to Gen7 than Gen4.
*/ */
if (intel->gen >= 6) if (brw->gen >= 6)
set_latency_gen7(brw->is_haswell); set_latency_gen7(brw->is_haswell);
else else
set_latency_gen4(); set_latency_gen4();

View File

@ -50,7 +50,6 @@
static void compile_sf_prog( struct brw_context *brw, static void compile_sf_prog( struct brw_context *brw,
struct brw_sf_prog_key *key ) struct brw_sf_prog_key *key )
{ {
struct intel_context *intel = &brw->intel;
struct brw_sf_compile c; struct brw_sf_compile c;
const GLuint *program; const GLuint *program;
void *mem_ctx; void *mem_ctx;
@ -118,7 +117,7 @@ static void compile_sf_prog( struct brw_context *brw,
printf("sf:\n"); printf("sf:\n");
for (i = 0; i < program_size / sizeof(struct brw_instruction); i++) for (i = 0; i < program_size / sizeof(struct brw_instruction); i++)
brw_disasm(stdout, &((struct brw_instruction *)program)[i], brw_disasm(stdout, &((struct brw_instruction *)program)[i],
intel->gen); brw->gen);
printf("\n"); printf("\n");
} }

View File

@ -165,7 +165,7 @@ static void copy_colors( struct brw_sf_compile *c,
static void do_flatshade_triangle( struct brw_sf_compile *c ) static void do_flatshade_triangle( struct brw_sf_compile *c )
{ {
struct brw_compile *p = &c->func; struct brw_compile *p = &c->func;
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
struct brw_reg ip = brw_ip_reg(); struct brw_reg ip = brw_ip_reg();
GLuint nr = _mesa_bitcount_64(c->key.attrs & VARYING_SLOT_COLOR_BITS); GLuint nr = _mesa_bitcount_64(c->key.attrs & VARYING_SLOT_COLOR_BITS);
GLuint jmpi = 1; GLuint jmpi = 1;
@ -178,7 +178,7 @@ static void do_flatshade_triangle( struct brw_sf_compile *c )
if (c->key.primitive == SF_UNFILLED_TRIS) if (c->key.primitive == SF_UNFILLED_TRIS)
return; return;
if (intel->gen == 5) if (brw->gen == 5)
jmpi = 2; jmpi = 2;
brw_push_insn_state(p); brw_push_insn_state(p);
@ -204,7 +204,7 @@ static void do_flatshade_triangle( struct brw_sf_compile *c )
static void do_flatshade_line( struct brw_sf_compile *c ) static void do_flatshade_line( struct brw_sf_compile *c )
{ {
struct brw_compile *p = &c->func; struct brw_compile *p = &c->func;
struct intel_context *intel = &p->brw->intel; struct brw_context *brw = p->brw;
struct brw_reg ip = brw_ip_reg(); struct brw_reg ip = brw_ip_reg();
GLuint nr = _mesa_bitcount_64(c->key.attrs & VARYING_SLOT_COLOR_BITS); GLuint nr = _mesa_bitcount_64(c->key.attrs & VARYING_SLOT_COLOR_BITS);
GLuint jmpi = 1; GLuint jmpi = 1;
@ -217,7 +217,7 @@ static void do_flatshade_line( struct brw_sf_compile *c )
if (c->key.primitive == SF_UNFILLED_TRIS) if (c->key.primitive == SF_UNFILLED_TRIS)
return; return;
if (intel->gen == 5) if (brw->gen == 5)
jmpi = 2; jmpi = 2;
brw_push_insn_state(p); brw_push_insn_state(p);

View File

@ -162,7 +162,7 @@ static void upload_sf_unit( struct brw_context *brw )
/* Each SF thread produces 1 PUE, and there can be up to 24 (Pre-Ironlake) or /* Each SF thread produces 1 PUE, and there can be up to 24 (Pre-Ironlake) or
* 48 (Ironlake) threads. * 48 (Ironlake) threads.
*/ */
if (intel->gen == 5) if (brw->gen == 5)
chipset_max_threads = 48; chipset_max_threads = 48;
else else
chipset_max_threads = 24; chipset_max_threads = 24;

View File

@ -89,7 +89,7 @@ brw_lower_packing_builtins(struct brw_context *brw,
| LOWER_PACK_UNORM_4x8 | LOWER_PACK_UNORM_4x8
| LOWER_UNPACK_UNORM_4x8; | LOWER_UNPACK_UNORM_4x8;
if (brw->intel.gen >= 7) { if (brw->gen >= 7) {
/* Gen7 introduced the f32to16 and f16to32 instructions, which can be /* Gen7 introduced the f32to16 and f16to32 instructions, which can be
* used to execute packHalf2x16 and unpackHalf2x16. For AOS code, no * used to execute packHalf2x16 and unpackHalf2x16. For AOS code, no
* lowering is needed. For SOA code, the Half2x16 ops must be * lowering is needed. For SOA code, the Half2x16 ops must be
@ -111,7 +111,6 @@ GLboolean
brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg) brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
{ {
struct brw_context *brw = brw_context(ctx); struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = &brw->intel;
unsigned int stage; unsigned int stage;
for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) { for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
@ -146,10 +145,10 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
*/ */
brw_lower_packing_builtins(brw, (gl_shader_type) stage, shader->ir); brw_lower_packing_builtins(brw, (gl_shader_type) stage, shader->ir);
do_mat_op_to_vec(shader->ir); do_mat_op_to_vec(shader->ir);
const int bitfield_insert = intel->gen >= 7 const int bitfield_insert = brw->gen >= 7
? BITFIELD_INSERT_TO_BFM_BFI ? BITFIELD_INSERT_TO_BFM_BFI
: 0; : 0;
const int lrp_to_arith = intel->gen < 6 ? LRP_TO_ARITH : 0; const int lrp_to_arith = brw->gen < 6 ? LRP_TO_ARITH : 0;
lower_instructions(shader->ir, lower_instructions(shader->ir,
MOD_TO_FRACT | MOD_TO_FRACT |
DIV_TO_MUL_RCP | DIV_TO_MUL_RCP |
@ -162,7 +161,7 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
/* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this, /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
* if-statements need to be flattened. * if-statements need to be flattened.
*/ */
if (intel->gen < 6) if (brw->gen < 6)
lower_if_to_cond_assign(shader->ir, 16); lower_if_to_cond_assign(shader->ir, 16);
do_lower_texture_projection(shader->ir); do_lower_texture_projection(shader->ir);

View File

@ -219,9 +219,8 @@ static void
dump_sdc(struct brw_context *brw, uint32_t offset) dump_sdc(struct brw_context *brw, uint32_t offset)
{ {
const char *name = "SDC"; const char *name = "SDC";
struct intel_context *intel = &brw->intel;
if (intel->gen >= 5 && intel->gen <= 6) { if (brw->gen >= 5 && brw->gen <= 6) {
struct gen5_sampler_default_color *sdc = (brw->batch.bo->virtual + struct gen5_sampler_default_color *sdc = (brw->batch.bo->virtual +
offset); offset);
batch_out(brw, name, offset, 0, "unorm rgba\n"); batch_out(brw, name, offset, 0, "unorm rgba\n");
@ -249,11 +248,10 @@ dump_sdc(struct brw_context *brw, uint32_t offset)
static void dump_sampler_state(struct brw_context *brw, static void dump_sampler_state(struct brw_context *brw,
uint32_t offset, uint32_t size) uint32_t offset, uint32_t size)
{ {
struct intel_context *intel = &brw->intel;
int i; int i;
struct brw_sampler_state *samp = brw->batch.bo->virtual + offset; struct brw_sampler_state *samp = brw->batch.bo->virtual + offset;
assert(intel->gen < 7); assert(brw->gen < 7);
for (i = 0; i < size / sizeof(*samp); i++) { for (i = 0; i < size / sizeof(*samp); i++) {
char name[20]; char name[20];
@ -272,11 +270,10 @@ static void dump_sampler_state(struct brw_context *brw,
static void dump_gen7_sampler_state(struct brw_context *brw, static void dump_gen7_sampler_state(struct brw_context *brw,
uint32_t offset, uint32_t size) uint32_t offset, uint32_t size)
{ {
struct intel_context *intel = &brw->intel;
struct gen7_sampler_state *samp = brw->batch.bo->virtual + offset; struct gen7_sampler_state *samp = brw->batch.bo->virtual + offset;
int i; int i;
assert(intel->gen >= 7); assert(brw->gen >= 7);
for (i = 0; i < size / sizeof(*samp); i++) { for (i = 0; i < size / sizeof(*samp); i++) {
char name[20]; char name[20];
@ -296,11 +293,10 @@ static void dump_gen7_sampler_state(struct brw_context *brw,
static void dump_sf_viewport_state(struct brw_context *brw, static void dump_sf_viewport_state(struct brw_context *brw,
uint32_t offset) uint32_t offset)
{ {
struct intel_context *intel = &brw->intel;
const char *name = "SF VP"; const char *name = "SF VP";
struct brw_sf_viewport *vp = brw->batch.bo->virtual + offset; struct brw_sf_viewport *vp = brw->batch.bo->virtual + offset;
assert(intel->gen < 7); assert(brw->gen < 7);
batch_out(brw, name, offset, 0, "m00 = %f\n", vp->viewport.m00); batch_out(brw, name, offset, 0, "m00 = %f\n", vp->viewport.m00);
batch_out(brw, name, offset, 1, "m11 = %f\n", vp->viewport.m11); batch_out(brw, name, offset, 1, "m11 = %f\n", vp->viewport.m11);
@ -318,11 +314,10 @@ static void dump_sf_viewport_state(struct brw_context *brw,
static void dump_clip_viewport_state(struct brw_context *brw, static void dump_clip_viewport_state(struct brw_context *brw,
uint32_t offset) uint32_t offset)
{ {
struct intel_context *intel = &brw->intel;
const char *name = "CLIP VP"; const char *name = "CLIP VP";
struct brw_clipper_viewport *vp = brw->batch.bo->virtual + offset; struct brw_clipper_viewport *vp = brw->batch.bo->virtual + offset;
assert(intel->gen < 7); assert(brw->gen < 7);
batch_out(brw, name, offset, 0, "xmin = %f\n", vp->xmin); batch_out(brw, name, offset, 0, "xmin = %f\n", vp->xmin);
batch_out(brw, name, offset, 1, "xmax = %f\n", vp->xmax); batch_out(brw, name, offset, 1, "xmax = %f\n", vp->xmax);
@ -333,11 +328,10 @@ static void dump_clip_viewport_state(struct brw_context *brw,
static void dump_sf_clip_viewport_state(struct brw_context *brw, static void dump_sf_clip_viewport_state(struct brw_context *brw,
uint32_t offset) uint32_t offset)
{ {
struct intel_context *intel = &brw->intel;
const char *name = "SF_CLIP VP"; const char *name = "SF_CLIP VP";
struct gen7_sf_clip_viewport *vp = brw->batch.bo->virtual + offset; struct gen7_sf_clip_viewport *vp = brw->batch.bo->virtual + offset;
assert(intel->gen >= 7); assert(brw->gen >= 7);
batch_out(brw, name, offset, 0, "m00 = %f\n", vp->viewport.m00); batch_out(brw, name, offset, 0, "m00 = %f\n", vp->viewport.m00);
batch_out(brw, name, offset, 1, "m11 = %f\n", vp->viewport.m11); batch_out(brw, name, offset, 1, "m11 = %f\n", vp->viewport.m11);
@ -485,7 +479,6 @@ static void dump_binding_table(struct brw_context *brw, uint32_t offset,
static void static void
dump_prog_cache(struct brw_context *brw) dump_prog_cache(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel;
struct brw_cache *cache = &brw->cache; struct brw_cache *cache = &brw->cache;
unsigned int b, i; unsigned int b, i;
uint32_t *data; uint32_t *data;
@ -528,7 +521,7 @@ dump_prog_cache(struct brw_context *brw)
name, name,
data[i * 4], data[i * 4 + 1], data[i * 4 + 2], data[i * 4 + 3]); data[i * 4], data[i * 4 + 1], data[i * 4 + 2], data[i * 4 + 3]);
brw_disasm(stderr, (void *)(data + i * 4), intel->gen); brw_disasm(stderr, (void *)(data + i * 4), brw->gen);
} }
} }
} }
@ -539,7 +532,6 @@ dump_prog_cache(struct brw_context *brw)
static void static void
dump_state_batch(struct brw_context *brw) dump_state_batch(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel;
int i; int i;
for (i = 0; i < brw->state_batch_count; i++) { for (i = 0; i < brw->state_batch_count; i++) {
@ -566,7 +558,7 @@ dump_state_batch(struct brw_context *brw)
dump_clip_viewport_state(brw, offset); dump_clip_viewport_state(brw, offset);
break; break;
case AUB_TRACE_SF_VP_STATE: case AUB_TRACE_SF_VP_STATE:
if (intel->gen >= 7) { if (brw->gen >= 7) {
dump_sf_clip_viewport_state(brw, offset); dump_sf_clip_viewport_state(brw, offset);
} else { } else {
dump_sf_viewport_state(brw, offset); dump_sf_viewport_state(brw, offset);
@ -579,7 +571,7 @@ dump_state_batch(struct brw_context *brw)
dump_depth_stencil_state(brw, offset); dump_depth_stencil_state(brw, offset);
break; break;
case AUB_TRACE_CC_STATE: case AUB_TRACE_CC_STATE:
if (intel->gen >= 6) if (brw->gen >= 6)
dump_cc_state_gen6(brw, offset); dump_cc_state_gen6(brw, offset);
else else
dump_cc_state_gen4(brw, offset); dump_cc_state_gen4(brw, offset);
@ -591,14 +583,14 @@ dump_state_batch(struct brw_context *brw)
dump_binding_table(brw, offset, size); dump_binding_table(brw, offset, size);
break; break;
case AUB_TRACE_SURFACE_STATE: case AUB_TRACE_SURFACE_STATE:
if (intel->gen < 7) { if (brw->gen < 7) {
dump_surface_state(brw, offset); dump_surface_state(brw, offset);
} else { } else {
dump_gen7_surface_state(brw, offset); dump_gen7_surface_state(brw, offset);
} }
break; break;
case AUB_TRACE_SAMPLER_STATE: case AUB_TRACE_SAMPLER_STATE:
if (intel->gen < 7) { if (brw->gen < 7) {
dump_sampler_state(brw, offset, size); dump_sampler_state(brw, offset, size);
} else { } else {
dump_gen7_sampler_state(brw, offset, size); dump_gen7_sampler_state(brw, offset, size);

View File

@ -237,8 +237,6 @@ static const struct brw_tracked_state *gen7_atoms[] =
static void static void
brw_upload_initial_gpu_state(struct brw_context *brw) brw_upload_initial_gpu_state(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel;
/* On platforms with hardware contexts, we can set our initial GPU state /* On platforms with hardware contexts, we can set our initial GPU state
* right away rather than doing it via state atoms. This saves a small * right away rather than doing it via state atoms. This saves a small
* amount of overhead on every draw call. * amount of overhead on every draw call.
@ -248,7 +246,7 @@ brw_upload_initial_gpu_state(struct brw_context *brw)
brw_upload_invariant_state(brw); brw_upload_invariant_state(brw);
if (intel->gen >= 7) { if (brw->gen >= 7) {
gen7_allocate_push_constants(brw); gen7_allocate_push_constants(brw);
} }
} }
@ -260,10 +258,10 @@ void brw_init_state( struct brw_context *brw )
brw_init_caches(brw); brw_init_caches(brw);
if (brw->intel.gen >= 7) { if (brw->gen >= 7) {
atoms = gen7_atoms; atoms = gen7_atoms;
num_atoms = ARRAY_SIZE(gen7_atoms); num_atoms = ARRAY_SIZE(gen7_atoms);
} else if (brw->intel.gen == 6) { } else if (brw->gen == 6) {
atoms = gen6_atoms; atoms = gen6_atoms;
num_atoms = ARRAY_SIZE(gen6_atoms); num_atoms = ARRAY_SIZE(gen6_atoms);
} else { } else {

View File

@ -527,12 +527,11 @@ brw_format_for_mesa_format(gl_format mesa_format)
void void
brw_init_surface_formats(struct brw_context *brw) brw_init_surface_formats(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel; struct gl_context *ctx = &brw->intel.ctx;
struct gl_context *ctx = &intel->ctx;
int gen; int gen;
gl_format format; gl_format format;
gen = intel->gen * 10; gen = brw->gen * 10;
if (brw->is_g4x) if (brw->is_g4x)
gen += 5; gen += 5;
@ -652,7 +651,6 @@ bool
brw_render_target_supported(struct brw_context *brw, brw_render_target_supported(struct brw_context *brw,
struct gl_renderbuffer *rb) struct gl_renderbuffer *rb)
{ {
struct intel_context *intel = &brw->intel;
gl_format format = rb->Format; gl_format format = rb->Format;
/* Many integer formats are promoted to RGBA (like XRGB8888 is), which means /* Many integer formats are promoted to RGBA (like XRGB8888 is), which means
@ -671,7 +669,7 @@ brw_render_target_supported(struct brw_context *brw,
*/ */
if (rb->NumSamples > 0 && _mesa_get_format_bytes(format) > 8) { if (rb->NumSamples > 0 && _mesa_get_format_bytes(format) > 8) {
/* Gen6: MSAA on >64 bit formats is unsupported. */ /* Gen6: MSAA on >64 bit formats is unsupported. */
if (intel->gen <= 6) if (brw->gen <= 6)
return false; return false;
/* Gen7: 8x MSAA on >64 bit formats is unsupported. */ /* Gen7: 8x MSAA on >64 bit formats is unsupported. */
@ -688,7 +686,6 @@ translate_tex_format(struct brw_context *brw,
GLenum depth_mode, GLenum depth_mode,
GLenum srgb_decode) GLenum srgb_decode)
{ {
struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &brw->intel.ctx; struct gl_context *ctx = &brw->intel.ctx;
if (srgb_decode == GL_SKIP_DECODE_EXT) if (srgb_decode == GL_SKIP_DECODE_EXT)
mesa_format = _mesa_get_srgb_format_linear(mesa_format); mesa_format = _mesa_get_srgb_format_linear(mesa_format);
@ -715,7 +712,7 @@ translate_tex_format(struct brw_context *brw,
return BRW_SURFACEFORMAT_R32G32B32A32_FLOAT; return BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
case MESA_FORMAT_SRGB_DXT1: case MESA_FORMAT_SRGB_DXT1:
if (intel->gen == 4 && !brw->is_g4x) { if (brw->gen == 4 && !brw->is_g4x) {
/* Work around missing SRGB DXT1 support on original gen4 by just /* Work around missing SRGB DXT1 support on original gen4 by just
* skipping SRGB decode. It's not worth not supporting sRGB in * skipping SRGB decode. It's not worth not supporting sRGB in
* general to prevent this. * general to prevent this.

View File

@ -42,7 +42,6 @@ static unsigned int
intel_horizontal_texture_alignment_unit(struct brw_context *brw, intel_horizontal_texture_alignment_unit(struct brw_context *brw,
gl_format format) gl_format format)
{ {
struct intel_context *intel = &brw->intel;
/** /**
* From the "Alignment Unit Size" section of various specs, namely: * From the "Alignment Unit Size" section of various specs, namely:
* - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4 * - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4
@ -86,7 +85,7 @@ intel_horizontal_texture_alignment_unit(struct brw_context *brw,
* offset workaround blits we do, align the X to 8, which depth texturing * offset workaround blits we do, align the X to 8, which depth texturing
* can handle (sadly, it can't handle 8 in the Y direction). * can handle (sadly, it can't handle 8 in the Y direction).
*/ */
if (intel->gen >= 7 && if (brw->gen >= 7 &&
_mesa_get_format_base_format(format) == GL_DEPTH_COMPONENT) _mesa_get_format_base_format(format) == GL_DEPTH_COMPONENT)
return 8; return 8;
@ -97,7 +96,6 @@ static unsigned int
intel_vertical_texture_alignment_unit(struct brw_context *brw, intel_vertical_texture_alignment_unit(struct brw_context *brw,
gl_format format) gl_format format)
{ {
struct intel_context *intel = &brw->intel;
/** /**
* From the "Alignment Unit Size" section of various specs, namely: * From the "Alignment Unit Size" section of various specs, namely:
* - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4 * - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4
@ -127,11 +125,11 @@ intel_vertical_texture_alignment_unit(struct brw_context *brw,
return 4; return 4;
if (format == MESA_FORMAT_S8) if (format == MESA_FORMAT_S8)
return intel->gen >= 7 ? 8 : 4; return brw->gen >= 7 ? 8 : 4;
GLenum base_format = _mesa_get_format_base_format(format); GLenum base_format = _mesa_get_format_base_format(format);
if (intel->gen >= 6 && if (brw->gen >= 6 &&
(base_format == GL_DEPTH_COMPONENT || (base_format == GL_DEPTH_COMPONENT ||
base_format == GL_DEPTH_STENCIL)) { base_format == GL_DEPTH_STENCIL)) {
return 4; return 4;
@ -210,7 +208,6 @@ static void
brw_miptree_layout_texture_array(struct brw_context *brw, brw_miptree_layout_texture_array(struct brw_context *brw,
struct intel_mipmap_tree *mt) struct intel_mipmap_tree *mt)
{ {
struct intel_context *intel = &brw->intel;
unsigned qpitch = 0; unsigned qpitch = 0;
int h0, h1; int h0, h1;
@ -219,7 +216,7 @@ brw_miptree_layout_texture_array(struct brw_context *brw,
if (mt->array_spacing_lod0) if (mt->array_spacing_lod0)
qpitch = h0; qpitch = h0;
else else
qpitch = (h0 + h1 + (intel->gen >= 7 ? 12 : 11) * mt->align_h); qpitch = (h0 + h1 + (brw->gen >= 7 ? 12 : 11) * mt->align_h);
if (mt->compressed) if (mt->compressed)
qpitch /= 4; qpitch /= 4;
@ -314,13 +311,12 @@ brw_miptree_layout_texture_3d(struct brw_context *brw,
void void
brw_miptree_layout(struct brw_context *brw, struct intel_mipmap_tree *mt) brw_miptree_layout(struct brw_context *brw, struct intel_mipmap_tree *mt)
{ {
struct intel_context *intel = &brw->intel;
mt->align_w = intel_horizontal_texture_alignment_unit(brw, mt->format); mt->align_w = intel_horizontal_texture_alignment_unit(brw, mt->format);
mt->align_h = intel_vertical_texture_alignment_unit(brw, mt->format); mt->align_h = intel_vertical_texture_alignment_unit(brw, mt->format);
switch (mt->target) { switch (mt->target) {
case GL_TEXTURE_CUBE_MAP: case GL_TEXTURE_CUBE_MAP:
if (intel->gen == 4) { if (brw->gen == 4) {
/* Gen4 stores cube maps as 3D textures. */ /* Gen4 stores cube maps as 3D textures. */
assert(mt->physical_depth0 == 6); assert(mt->physical_depth0 == 6);
brw_miptree_layout_texture_3d(brw, mt); brw_miptree_layout_texture_3d(brw, mt);

View File

@ -114,7 +114,6 @@ static bool check_urb_layout(struct brw_context *brw)
*/ */
static void recalculate_urb_fence( struct brw_context *brw ) static void recalculate_urb_fence( struct brw_context *brw )
{ {
struct intel_context *intel = &brw->intel;
GLuint csize = brw->curbe.total_size; GLuint csize = brw->curbe.total_size;
GLuint vsize = brw->vs.prog_data->base.urb_entry_size; GLuint vsize = brw->vs.prog_data->base.urb_entry_size;
GLuint sfsize = brw->sf.prog_data->urb_entry_size; GLuint sfsize = brw->sf.prog_data->urb_entry_size;
@ -148,7 +147,7 @@ static void recalculate_urb_fence( struct brw_context *brw )
brw->urb.constrained = 0; brw->urb.constrained = 0;
if (intel->gen == 5) { if (brw->gen == 5) {
brw->urb.nr_vs_entries = 128; brw->urb.nr_vs_entries = 128;
brw->urb.nr_sf_entries = 48; brw->urb.nr_sf_entries = 48;
if (check_urb_layout(brw)) { if (check_urb_layout(brw)) {

View File

@ -215,7 +215,7 @@ vec4_instruction::is_send_from_grf()
bool bool
vec4_visitor::can_do_source_mods(vec4_instruction *inst) vec4_visitor::can_do_source_mods(vec4_instruction *inst)
{ {
if (intel->gen == 6 && inst->is_math()) if (brw->gen == 6 && inst->is_math())
return false; return false;
if (inst->is_send_from_grf()) if (inst->is_send_from_grf())
@ -878,7 +878,7 @@ vec4_visitor::opt_register_coalesce()
if (scan_inst->mlen) if (scan_inst->mlen)
break; break;
if (intel->gen == 6) { if (brw->gen == 6) {
/* gen6 math instructions must have the destination be /* gen6 math instructions must have the destination be
* GRF, so no compute-to-MRF for them. * GRF, so no compute-to-MRF for them.
*/ */
@ -1248,7 +1248,7 @@ vec4_vs_visitor::setup_attributes(int payload_reg)
unsigned vue_entries = unsigned vue_entries =
MAX2(nr_attributes, prog_data->vue_map.num_slots); MAX2(nr_attributes, prog_data->vue_map.num_slots);
if (intel->gen == 6) if (brw->gen == 6)
prog_data->urb_entry_size = ALIGN(vue_entries, 8) / 8; prog_data->urb_entry_size = ALIGN(vue_entries, 8) / 8;
else else
prog_data->urb_entry_size = ALIGN(vue_entries, 4) / 4; prog_data->urb_entry_size = ALIGN(vue_entries, 4) / 4;
@ -1262,7 +1262,7 @@ vec4_visitor::setup_uniforms(int reg)
/* The pre-gen6 VS requires that some push constants get loaded no /* The pre-gen6 VS requires that some push constants get loaded no
* matter what, or the GPU would hang. * matter what, or the GPU would hang.
*/ */
if (intel->gen < 6 && this->uniforms == 0) { if (brw->gen < 6 && this->uniforms == 0) {
this->uniform_vector_size[this->uniforms] = 1; this->uniform_vector_size[this->uniforms] = 1;
for (unsigned int i = 0; i < 4; i++) { for (unsigned int i = 0; i < 4; i++) {
@ -1305,7 +1305,7 @@ vec4_visitor::setup_payload(void)
src_reg src_reg
vec4_visitor::get_timestamp() vec4_visitor::get_timestamp()
{ {
assert(intel->gen >= 7); assert(brw->gen >= 7);
src_reg ts = src_reg(brw_reg(BRW_ARCHITECTURE_REGISTER_FILE, src_reg ts = src_reg(brw_reg(BRW_ARCHITECTURE_REGISTER_FILE,
BRW_ARF_TIMESTAMP, BRW_ARF_TIMESTAMP,

View File

@ -268,7 +268,7 @@ vec4_generator::generate_tex(vec4_instruction *inst,
{ {
int msg_type = -1; int msg_type = -1;
if (intel->gen >= 5) { if (brw->gen >= 5) {
switch (inst->opcode) { switch (inst->opcode) {
case SHADER_OPCODE_TEX: case SHADER_OPCODE_TEX:
case SHADER_OPCODE_TXL: case SHADER_OPCODE_TXL:
@ -291,7 +291,7 @@ vec4_generator::generate_tex(vec4_instruction *inst,
msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD; msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
break; break;
case SHADER_OPCODE_TXF_MS: case SHADER_OPCODE_TXF_MS:
if (intel->gen >= 7) if (brw->gen >= 7)
msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS; msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS;
else else
msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD; msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
@ -410,7 +410,7 @@ vec4_generator::generate_oword_dual_block_offsets(struct brw_reg m1,
{ {
int second_vertex_offset; int second_vertex_offset;
if (intel->gen >= 6) if (brw->gen >= 6)
second_vertex_offset = 1; second_vertex_offset = 1;
else else
second_vertex_offset = 16; second_vertex_offset = 16;
@ -455,9 +455,9 @@ vec4_generator::generate_scratch_read(vec4_instruction *inst,
uint32_t msg_type; uint32_t msg_type;
if (intel->gen >= 6) if (brw->gen >= 6)
msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
else if (intel->gen == 5 || brw->is_g4x) else if (brw->gen == 5 || brw->is_g4x)
msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
else else
msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
@ -468,7 +468,7 @@ vec4_generator::generate_scratch_read(vec4_instruction *inst,
struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND); struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND);
brw_set_dest(p, send, dst); brw_set_dest(p, send, dst);
brw_set_src0(p, send, header); brw_set_src0(p, send, header);
if (intel->gen < 6) if (brw->gen < 6)
send->header.destreg__conditionalmod = inst->base_mrf; send->header.destreg__conditionalmod = inst->base_mrf;
brw_set_dp_read_message(p, send, brw_set_dp_read_message(p, send,
255, /* binding table index: stateless access */ 255, /* binding table index: stateless access */
@ -505,9 +505,9 @@ vec4_generator::generate_scratch_write(vec4_instruction *inst,
uint32_t msg_type; uint32_t msg_type;
if (intel->gen >= 7) if (brw->gen >= 7)
msg_type = GEN7_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE; msg_type = GEN7_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE;
else if (intel->gen == 6) else if (brw->gen == 6)
msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE; msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE;
else else
msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE; msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE;
@ -519,7 +519,7 @@ vec4_generator::generate_scratch_write(vec4_instruction *inst,
* guaranteed and write commits only matter for inter-thread * guaranteed and write commits only matter for inter-thread
* synchronization. * synchronization.
*/ */
if (intel->gen >= 6) { if (brw->gen >= 6) {
write_commit = false; write_commit = false;
} else { } else {
/* The visitor set up our destination register to be g0. This /* The visitor set up our destination register to be g0. This
@ -539,7 +539,7 @@ vec4_generator::generate_scratch_write(vec4_instruction *inst,
struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND); struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND);
brw_set_dest(p, send, dst); brw_set_dest(p, send, dst);
brw_set_src0(p, send, header); brw_set_src0(p, send, header);
if (intel->gen < 6) if (brw->gen < 6)
send->header.destreg__conditionalmod = inst->base_mrf; send->header.destreg__conditionalmod = inst->base_mrf;
brw_set_dp_write_message(p, send, brw_set_dp_write_message(p, send,
255, /* binding table index: stateless access */ 255, /* binding table index: stateless access */
@ -559,7 +559,7 @@ vec4_generator::generate_pull_constant_load(vec4_instruction *inst,
struct brw_reg index, struct brw_reg index,
struct brw_reg offset) struct brw_reg offset)
{ {
assert(intel->gen <= 7); assert(brw->gen <= 7);
assert(index.file == BRW_IMMEDIATE_VALUE && assert(index.file == BRW_IMMEDIATE_VALUE &&
index.type == BRW_REGISTER_TYPE_UD); index.type == BRW_REGISTER_TYPE_UD);
uint32_t surf_index = index.dw1.ud; uint32_t surf_index = index.dw1.ud;
@ -573,9 +573,9 @@ vec4_generator::generate_pull_constant_load(vec4_instruction *inst,
uint32_t msg_type; uint32_t msg_type;
if (intel->gen >= 6) if (brw->gen >= 6)
msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
else if (intel->gen == 5 || brw->is_g4x) else if (brw->gen == 5 || brw->is_g4x)
msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
else else
msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
@ -586,7 +586,7 @@ vec4_generator::generate_pull_constant_load(vec4_instruction *inst,
struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND); struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND);
brw_set_dest(p, send, dst); brw_set_dest(p, send, dst);
brw_set_src0(p, send, header); brw_set_src0(p, send, header);
if (intel->gen < 6) if (brw->gen < 6)
send->header.destreg__conditionalmod = inst->base_mrf; send->header.destreg__conditionalmod = inst->base_mrf;
brw_set_dp_read_message(p, send, brw_set_dp_read_message(p, send,
surf_index, surf_index,
@ -757,7 +757,7 @@ vec4_generator::generate_vec4_instruction(vec4_instruction *instruction,
case BRW_OPCODE_IF: case BRW_OPCODE_IF:
if (inst->src[0].file != BAD_FILE) { if (inst->src[0].file != BAD_FILE) {
/* The instruction has an embedded compare (only allowed on gen6) */ /* The instruction has an embedded compare (only allowed on gen6) */
assert(intel->gen == 6); assert(brw->gen == 6);
gen6_IF(p, inst->conditional_mod, src[0], src[1]); gen6_IF(p, inst->conditional_mod, src[0], src[1]);
} else { } else {
struct brw_instruction *brw_inst = brw_IF(p, BRW_EXECUTE_8); struct brw_instruction *brw_inst = brw_IF(p, BRW_EXECUTE_8);
@ -782,7 +782,7 @@ vec4_generator::generate_vec4_instruction(vec4_instruction *instruction,
break; break;
case BRW_OPCODE_CONTINUE: case BRW_OPCODE_CONTINUE:
/* FINISHME: We need to write the loop instruction support still. */ /* FINISHME: We need to write the loop instruction support still. */
if (intel->gen >= 6) if (brw->gen >= 6)
gen6_CONT(p); gen6_CONT(p);
else else
brw_CONT(p); brw_CONT(p);
@ -800,7 +800,7 @@ vec4_generator::generate_vec4_instruction(vec4_instruction *instruction,
case SHADER_OPCODE_LOG2: case SHADER_OPCODE_LOG2:
case SHADER_OPCODE_SIN: case SHADER_OPCODE_SIN:
case SHADER_OPCODE_COS: case SHADER_OPCODE_COS:
if (intel->gen == 6) { if (brw->gen == 6) {
generate_math1_gen6(inst, dst, src[0]); generate_math1_gen6(inst, dst, src[0]);
} else { } else {
/* Also works for Gen7. */ /* Also works for Gen7. */
@ -811,9 +811,9 @@ vec4_generator::generate_vec4_instruction(vec4_instruction *instruction,
case SHADER_OPCODE_POW: case SHADER_OPCODE_POW:
case SHADER_OPCODE_INT_QUOTIENT: case SHADER_OPCODE_INT_QUOTIENT:
case SHADER_OPCODE_INT_REMAINDER: case SHADER_OPCODE_INT_REMAINDER:
if (intel->gen >= 7) { if (brw->gen >= 7) {
generate_math2_gen7(inst, dst, src[0], src[1]); generate_math2_gen7(inst, dst, src[0], src[1]);
} else if (intel->gen == 6) { } else if (brw->gen == 6) {
generate_math2_gen6(inst, dst, src[0], src[1]); generate_math2_gen6(inst, dst, src[0], src[1]);
} else { } else {
generate_math2_gen4(inst, dst, src[0], src[1]); generate_math2_gen4(inst, dst, src[0], src[1]);

View File

@ -101,8 +101,6 @@ brw_alloc_reg_set_for_classes(struct brw_context *brw,
int class_count, int class_count,
int base_reg_count) int base_reg_count)
{ {
struct intel_context *intel = &brw->intel;
/* Compute the total number of registers across all classes. */ /* Compute the total number of registers across all classes. */
int ra_reg_count = 0; int ra_reg_count = 0;
for (int i = 0; i < class_count; i++) { for (int i = 0; i < class_count; i++) {
@ -113,7 +111,7 @@ brw_alloc_reg_set_for_classes(struct brw_context *brw,
brw->vs.ra_reg_to_grf = ralloc_array(brw, uint8_t, ra_reg_count); brw->vs.ra_reg_to_grf = ralloc_array(brw, uint8_t, ra_reg_count);
ralloc_free(brw->vs.regs); ralloc_free(brw->vs.regs);
brw->vs.regs = ra_alloc_reg_set(brw, ra_reg_count); brw->vs.regs = ra_alloc_reg_set(brw, ra_reg_count);
if (intel->gen >= 6) if (brw->gen >= 6)
ra_set_allocate_round_robin(brw->vs.regs); ra_set_allocate_round_robin(brw->vs.regs);
ralloc_free(brw->vs.classes); ralloc_free(brw->vs.classes);
brw->vs.classes = ralloc_array(brw, int, class_count + 1); brw->vs.classes = ralloc_array(brw, int, class_count + 1);

View File

@ -160,7 +160,7 @@ vec4_visitor::IF(uint32_t predicate)
vec4_instruction * vec4_instruction *
vec4_visitor::IF(src_reg src0, src_reg src1, uint32_t condition) vec4_visitor::IF(src_reg src0, src_reg src1, uint32_t condition)
{ {
assert(intel->gen >= 6); assert(brw->gen >= 6);
vec4_instruction *inst; vec4_instruction *inst;
@ -188,7 +188,7 @@ vec4_visitor::CMP(dst_reg dst, src_reg src0, src_reg src1, uint32_t condition)
* before before comparison, producing garbage results for floating * before before comparison, producing garbage results for floating
* point comparisons. * point comparisons.
*/ */
if (intel->gen == 4) { if (brw->gen == 4) {
dst.type = src0.type; dst.type = src0.type;
if (dst.file == HW_REG) if (dst.file == HW_REG)
dst.fixed_hw_reg.type = dst.type; dst.fixed_hw_reg.type = dst.type;
@ -276,7 +276,7 @@ vec4_visitor::fix_math_operand(src_reg src)
* can't use. * can't use.
*/ */
if (intel->gen == 7 && src.file != IMM) if (brw->gen == 7 && src.file != IMM)
return src; return src;
dst_reg expanded = dst_reg(this, glsl_type::vec4_type); dst_reg expanded = dst_reg(this, glsl_type::vec4_type);
@ -329,7 +329,7 @@ vec4_visitor::emit_math(opcode opcode, dst_reg dst, src_reg src)
return; return;
} }
if (intel->gen >= 6) { if (brw->gen >= 6) {
return emit_math1_gen6(opcode, dst, src); return emit_math1_gen6(opcode, dst, src);
} else { } else {
return emit_math1_gen4(opcode, dst, src); return emit_math1_gen4(opcode, dst, src);
@ -381,7 +381,7 @@ vec4_visitor::emit_math(enum opcode opcode,
return; return;
} }
if (intel->gen >= 6) { if (brw->gen >= 6) {
return emit_math2_gen6(opcode, dst, src0, src1); return emit_math2_gen6(opcode, dst, src0, src1);
} else { } else {
return emit_math2_gen4(opcode, dst, src0, src1); return emit_math2_gen4(opcode, dst, src0, src1);
@ -391,7 +391,7 @@ vec4_visitor::emit_math(enum opcode opcode,
void void
vec4_visitor::emit_pack_half_2x16(dst_reg dst, src_reg src0) vec4_visitor::emit_pack_half_2x16(dst_reg dst, src_reg src0)
{ {
if (intel->gen < 7) if (brw->gen < 7)
assert(!"ir_unop_pack_half_2x16 should be lowered"); assert(!"ir_unop_pack_half_2x16 should be lowered");
assert(dst.type == BRW_REGISTER_TYPE_UD); assert(dst.type == BRW_REGISTER_TYPE_UD);
@ -467,7 +467,7 @@ vec4_visitor::emit_pack_half_2x16(dst_reg dst, src_reg src0)
void void
vec4_visitor::emit_unpack_half_2x16(dst_reg dst, src_reg src0) vec4_visitor::emit_unpack_half_2x16(dst_reg dst, src_reg src0)
{ {
if (intel->gen < 7) if (brw->gen < 7)
assert(!"ir_unop_unpack_half_2x16 should be lowered"); assert(!"ir_unop_unpack_half_2x16 should be lowered");
assert(dst.type == BRW_REGISTER_TYPE_F); assert(dst.type == BRW_REGISTER_TYPE_F);
@ -662,7 +662,7 @@ vec4_visitor::setup_uniform_clipplane_values()
{ {
gl_clip_plane *clip_planes = brw_select_clip_planes(ctx); gl_clip_plane *clip_planes = brw_select_clip_planes(ctx);
if (intel->gen < 6) { if (brw->gen < 6) {
/* Pre-Gen6, we compact clip planes. For example, if the user /* Pre-Gen6, we compact clip planes. For example, if the user
* enables just clip planes 0, 1, and 3, we will enable clip planes * enables just clip planes 0, 1, and 3, we will enable clip planes
* 0, 1, and 2 in the hardware, and we'll move clip plane 3 to clip * 0, 1, and 2 in the hardware, and we'll move clip plane 3 to clip
@ -783,7 +783,7 @@ vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir, uint32_t *predicate)
break; break;
case ir_unop_f2b: case ir_unop_f2b:
if (intel->gen >= 6) { if (brw->gen >= 6) {
emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ)); emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ));
} else { } else {
inst = emit(MOV(dst_null_f(), op[0])); inst = emit(MOV(dst_null_f(), op[0]));
@ -792,7 +792,7 @@ vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir, uint32_t *predicate)
break; break;
case ir_unop_i2b: case ir_unop_i2b:
if (intel->gen >= 6) { if (brw->gen >= 6) {
emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ)); emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
} else { } else {
inst = emit(MOV(dst_null_d(), op[0])); inst = emit(MOV(dst_null_d(), op[0]));
@ -836,7 +836,7 @@ vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir, uint32_t *predicate)
resolve_ud_negate(&this->result); resolve_ud_negate(&this->result);
if (intel->gen >= 6) { if (brw->gen >= 6) {
vec4_instruction *inst = emit(AND(dst_null_d(), vec4_instruction *inst = emit(AND(dst_null_d(),
this->result, src_reg(1))); this->result, src_reg(1)));
inst->conditional_mod = BRW_CONDITIONAL_NZ; inst->conditional_mod = BRW_CONDITIONAL_NZ;
@ -1254,7 +1254,7 @@ bool
vec4_visitor::try_emit_mad(ir_expression *ir, int mul_arg) vec4_visitor::try_emit_mad(ir_expression *ir, int mul_arg)
{ {
/* 3-src instructions were introduced in gen6. */ /* 3-src instructions were introduced in gen6. */
if (intel->gen < 6) if (brw->gen < 6)
return false; return false;
/* MAD can only handle floating-point data. */ /* MAD can only handle floating-point data. */
@ -1287,7 +1287,7 @@ vec4_visitor::emit_bool_comparison(unsigned int op,
dst_reg dst, src_reg src0, src_reg src1) dst_reg dst, src_reg src0, src_reg src1)
{ {
/* original gen4 does destination conversion before comparison. */ /* original gen4 does destination conversion before comparison. */
if (intel->gen < 5) if (brw->gen < 5)
dst.type = src0.type; dst.type = src0.type;
emit(CMP(dst, src0, src1, brw_conditional_for_comparison(op))); emit(CMP(dst, src0, src1, brw_conditional_for_comparison(op)));
@ -1302,7 +1302,7 @@ vec4_visitor::emit_minmax(uint32_t conditionalmod, dst_reg dst,
{ {
vec4_instruction *inst; vec4_instruction *inst;
if (intel->gen >= 6) { if (brw->gen >= 6) {
inst = emit(BRW_OPCODE_SEL, dst, src0, src1); inst = emit(BRW_OPCODE_SEL, dst, src0, src1);
inst->conditional_mod = conditionalmod; inst->conditional_mod = conditionalmod;
} else { } else {
@ -1493,12 +1493,12 @@ vec4_visitor::visit(ir_expression *ir)
* 16 bits, though, we can just emit a single MUL. * 16 bits, though, we can just emit a single MUL.
*/ */
if (is_16bit_constant(ir->operands[0])) { if (is_16bit_constant(ir->operands[0])) {
if (intel->gen < 7) if (brw->gen < 7)
emit(MUL(result_dst, op[0], op[1])); emit(MUL(result_dst, op[0], op[1]));
else else
emit(MUL(result_dst, op[1], op[0])); emit(MUL(result_dst, op[1], op[0]));
} else if (is_16bit_constant(ir->operands[1])) { } else if (is_16bit_constant(ir->operands[1])) {
if (intel->gen < 7) if (brw->gen < 7)
emit(MUL(result_dst, op[1], op[0])); emit(MUL(result_dst, op[1], op[0]));
else else
emit(MUL(result_dst, op[0], op[1])); emit(MUL(result_dst, op[0], op[1]));
@ -2355,7 +2355,7 @@ vec4_visitor::visit(ir_texture *ir)
bool use_texture_offset = ir->offset != NULL && ir->op != ir_txf; bool use_texture_offset = ir->offset != NULL && ir->op != ir_txf;
/* Texel offsets go in the message header; Gen4 also requires headers. */ /* Texel offsets go in the message header; Gen4 also requires headers. */
inst->header_present = use_texture_offset || intel->gen < 5; inst->header_present = use_texture_offset || brw->gen < 5;
inst->base_mrf = 2; inst->base_mrf = 2;
inst->mlen = inst->header_present + 1; /* always at least one */ inst->mlen = inst->header_present + 1; /* always at least one */
inst->sampler = sampler; inst->sampler = sampler;
@ -2370,7 +2370,7 @@ vec4_visitor::visit(ir_texture *ir)
int param_base = inst->base_mrf + inst->header_present; int param_base = inst->base_mrf + inst->header_present;
if (ir->op == ir_txs) { if (ir->op == ir_txs) {
int writemask = intel->gen == 4 ? WRITEMASK_W : WRITEMASK_X; int writemask = brw->gen == 4 ? WRITEMASK_W : WRITEMASK_X;
emit(MOV(dst_reg(MRF, param_base, lod_type, writemask), lod)); emit(MOV(dst_reg(MRF, param_base, lod_type, writemask), lod));
} else { } else {
int i, coord_mask = 0, zero_mask = 0; int i, coord_mask = 0, zero_mask = 0;
@ -2416,7 +2416,7 @@ vec4_visitor::visit(ir_texture *ir)
/* Load the LOD info */ /* Load the LOD info */
if (ir->op == ir_tex || ir->op == ir_txl) { if (ir->op == ir_tex || ir->op == ir_txl) {
int mrf, writemask; int mrf, writemask;
if (intel->gen >= 5) { if (brw->gen >= 5) {
mrf = param_base + 1; mrf = param_base + 1;
if (ir->shadow_comparitor) { if (ir->shadow_comparitor) {
writemask = WRITEMASK_Y; writemask = WRITEMASK_Y;
@ -2425,7 +2425,7 @@ vec4_visitor::visit(ir_texture *ir)
writemask = WRITEMASK_X; writemask = WRITEMASK_X;
inst->mlen++; inst->mlen++;
} }
} else /* intel->gen == 4 */ { } else /* brw->gen == 4 */ {
mrf = param_base; mrf = param_base;
writemask = WRITEMASK_Z; writemask = WRITEMASK_Z;
} }
@ -2445,7 +2445,7 @@ vec4_visitor::visit(ir_texture *ir)
} else if (ir->op == ir_txd) { } else if (ir->op == ir_txd) {
const glsl_type *type = lod_type; const glsl_type *type = lod_type;
if (intel->gen >= 5) { if (brw->gen >= 5) {
dPdx.swizzle = BRW_SWIZZLE4(SWIZZLE_X,SWIZZLE_X,SWIZZLE_Y,SWIZZLE_Y); dPdx.swizzle = BRW_SWIZZLE4(SWIZZLE_X,SWIZZLE_X,SWIZZLE_Y,SWIZZLE_Y);
dPdy.swizzle = BRW_SWIZZLE4(SWIZZLE_X,SWIZZLE_X,SWIZZLE_Y,SWIZZLE_Y); dPdy.swizzle = BRW_SWIZZLE4(SWIZZLE_X,SWIZZLE_X,SWIZZLE_Y,SWIZZLE_Y);
emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XZ), dPdx)); emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XZ), dPdx));
@ -2465,7 +2465,7 @@ vec4_visitor::visit(ir_texture *ir)
shadow_comparitor)); shadow_comparitor));
} }
} }
} else /* intel->gen == 4 */ { } else /* brw->gen == 4 */ {
emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XYZ), dPdx)); emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XYZ), dPdx));
emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_XYZ), dPdy)); emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_XYZ), dPdy));
inst->mlen += 2; inst->mlen += 2;
@ -2560,7 +2560,7 @@ vec4_visitor::visit(ir_if *ir)
*/ */
this->base_ir = ir->condition; this->base_ir = ir->condition;
if (intel->gen == 6) { if (brw->gen == 6) {
emit_if_gen6(ir); emit_if_gen6(ir);
} else { } else {
uint32_t predicate; uint32_t predicate;
@ -2607,7 +2607,7 @@ vec4_visitor::emit_ndc_computation()
void void
vec4_visitor::emit_psiz_and_flags(struct brw_reg reg) vec4_visitor::emit_psiz_and_flags(struct brw_reg reg)
{ {
if (intel->gen < 6 && if (brw->gen < 6 &&
((prog_data->vue_map.slots_valid & VARYING_BIT_PSIZ) || ((prog_data->vue_map.slots_valid & VARYING_BIT_PSIZ) ||
key->userclip_active || brw->has_negative_rhw_bug)) { key->userclip_active || brw->has_negative_rhw_bug)) {
dst_reg header1 = dst_reg(this, glsl_type::uvec4_type); dst_reg header1 = dst_reg(this, glsl_type::uvec4_type);
@ -2660,7 +2660,7 @@ vec4_visitor::emit_psiz_and_flags(struct brw_reg reg)
} }
emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), src_reg(header1))); emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), src_reg(header1)));
} else if (intel->gen < 6) { } else if (brw->gen < 6) {
emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), 0u)); emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), 0u));
} else { } else {
emit(MOV(retype(reg, BRW_REGISTER_TYPE_D), src_reg(0))); emit(MOV(retype(reg, BRW_REGISTER_TYPE_D), src_reg(0)));
@ -2678,7 +2678,7 @@ vec4_visitor::emit_psiz_and_flags(struct brw_reg reg)
void void
vec4_visitor::emit_clip_distances(struct brw_reg reg, int offset) vec4_visitor::emit_clip_distances(struct brw_reg reg, int offset)
{ {
if (intel->gen < 6) { if (brw->gen < 6) {
/* Clip distance slots are set aside in gen5, but they are not used. It /* Clip distance slots are set aside in gen5, but they are not used. It
* is not clear whether we actually need to set aside space for them, * is not clear whether we actually need to set aside space for them,
* but the performance cost is negligible. * but the performance cost is negligible.
@ -2782,9 +2782,7 @@ vec4_visitor::emit_urb_slot(int mrf, int varying)
static int static int
align_interleaved_urb_mlen(struct brw_context *brw, int mlen) align_interleaved_urb_mlen(struct brw_context *brw, int mlen)
{ {
struct intel_context *intel = &brw->intel; if (brw->gen >= 6) {
if (intel->gen >= 6) {
/* URB data written (does not include the message header reg) must /* URB data written (does not include the message header reg) must
* be a multiple of 256 bits, or 2 VS registers. See vol5c.5, * be a multiple of 256 bits, or 2 VS registers. See vol5c.5,
* section 5.4.3.2.2: URB_INTERLEAVED. * section 5.4.3.2.2: URB_INTERLEAVED.
@ -2855,7 +2853,7 @@ vec4_visitor::emit_vertex()
*/ */
emit_urb_write_header(mrf++); emit_urb_write_header(mrf++);
if (intel->gen < 6) { if (brw->gen < 6) {
emit_ndc_computation(); emit_ndc_computation();
} }
@ -2924,7 +2922,7 @@ vec4_visitor::get_scratch_offset(vec4_instruction *inst,
/* Pre-gen6, the message header uses byte offsets instead of vec4 /* Pre-gen6, the message header uses byte offsets instead of vec4
* (16-byte) offset units. * (16-byte) offset units.
*/ */
if (intel->gen < 6) if (brw->gen < 6)
message_header_scale *= 16; message_header_scale *= 16;
if (reladdr) { if (reladdr) {
@ -2952,13 +2950,13 @@ vec4_visitor::get_pull_constant_offset(vec4_instruction *inst,
/* Pre-gen6, the message header uses byte offsets instead of vec4 /* Pre-gen6, the message header uses byte offsets instead of vec4
* (16-byte) offset units. * (16-byte) offset units.
*/ */
if (intel->gen < 6) { if (brw->gen < 6) {
emit_before(inst, MUL(dst_reg(index), index, src_reg(16))); emit_before(inst, MUL(dst_reg(index), index, src_reg(16)));
} }
return index; return index;
} else { } else {
int message_header_scale = intel->gen < 6 ? 16 : 1; int message_header_scale = brw->gen < 6 ? 16 : 1;
return src_reg(reg_offset * message_header_scale); return src_reg(reg_offset * message_header_scale);
} }
} }
@ -3111,7 +3109,7 @@ vec4_visitor::emit_pull_constant_load(vec4_instruction *inst,
src_reg offset = get_pull_constant_offset(inst, orig_src.reladdr, reg_offset); src_reg offset = get_pull_constant_offset(inst, orig_src.reladdr, reg_offset);
vec4_instruction *load; vec4_instruction *load;
if (intel->gen >= 7) { if (brw->gen >= 7) {
dst_reg grf_offset = dst_reg(this, glsl_type::int_type); dst_reg grf_offset = dst_reg(this, glsl_type::int_type);
grf_offset.type = offset.type; grf_offset.type = offset.type;
emit_before(inst, MOV(grf_offset, offset)); emit_before(inst, MOV(grf_offset, offset));
@ -3256,7 +3254,7 @@ vec4_visitor::vec4_visitor(struct brw_context *brw,
this->virtual_grf_array_size = 0; this->virtual_grf_array_size = 0;
this->live_intervals_valid = false; this->live_intervals_valid = false;
this->max_grf = intel->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF; this->max_grf = brw->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF;
this->uniforms = 0; this->uniforms = 0;
} }

View File

@ -111,7 +111,7 @@ vec4_vs_visitor::emit_program_code()
break; break;
case OPCODE_ARL: case OPCODE_ARL:
if (intel->gen >= 6) { if (brw->gen >= 6) {
dst.writemask = WRITEMASK_X; dst.writemask = WRITEMASK_X;
dst_reg dst_f = dst; dst_reg dst_f = dst;
dst_f.type = BRW_REGISTER_TYPE_F; dst_f.type = BRW_REGISTER_TYPE_F;
@ -547,7 +547,7 @@ vec4_vs_visitor::get_vp_src_reg(const prog_src_register &src)
dst_reladdr.writemask = WRITEMASK_X; dst_reladdr.writemask = WRITEMASK_X;
emit(ADD(dst_reladdr, this->vp_addr_reg, src_reg(src.Index))); emit(ADD(dst_reladdr, this->vp_addr_reg, src_reg(src.Index)));
if (intel->gen < 6) if (brw->gen < 6)
emit(MUL(dst_reladdr, reladdr, src_reg(16))); emit(MUL(dst_reladdr, reladdr, src_reg(16)));
#if 0 #if 0

View File

@ -61,8 +61,6 @@ void
brw_compute_vue_map(struct brw_context *brw, struct brw_vue_map *vue_map, brw_compute_vue_map(struct brw_context *brw, struct brw_vue_map *vue_map,
GLbitfield64 slots_valid, bool userclip_active) GLbitfield64 slots_valid, bool userclip_active)
{ {
const struct intel_context *intel = &brw->intel;
vue_map->slots_valid = slots_valid; vue_map->slots_valid = slots_valid;
int i; int i;
@ -83,7 +81,7 @@ brw_compute_vue_map(struct brw_context *brw, struct brw_vue_map *vue_map,
/* VUE header: format depends on chip generation and whether clipping is /* VUE header: format depends on chip generation and whether clipping is
* enabled. * enabled.
*/ */
switch (intel->gen) { switch (brw->gen) {
case 4: case 4:
case 5: case 5:
/* There are 8 dwords in VUE header pre-Ironlake: /* There are 8 dwords in VUE header pre-Ironlake:
@ -220,7 +218,6 @@ do_vs_prog(struct brw_context *brw,
struct brw_vertex_program *vp, struct brw_vertex_program *vp,
struct brw_vs_prog_key *key) struct brw_vs_prog_key *key)
{ {
struct intel_context *intel = &brw->intel;
GLuint program_size; GLuint program_size;
const GLuint *program; const GLuint *program;
struct brw_vs_compile c; struct brw_vs_compile c;
@ -269,7 +266,7 @@ do_vs_prog(struct brw_context *brw,
prog_data.inputs_read |= VERT_BIT_EDGEFLAG; prog_data.inputs_read |= VERT_BIT_EDGEFLAG;
} }
if (intel->gen < 6) { if (brw->gen < 6) {
/* Put dummy slots into the VUE for the SF to put the replaced /* Put dummy slots into the VUE for the SF to put the replaced
* point sprite coords in. We shouldn't need these dummy slots, * point sprite coords in. We shouldn't need these dummy slots,
* which take up precious URB space, but it would mean that the SF * which take up precious URB space, but it would mean that the SF
@ -406,8 +403,7 @@ brw_vs_debug_recompile(struct brw_context *brw,
static void brw_upload_vs_prog(struct brw_context *brw) static void brw_upload_vs_prog(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel; struct gl_context *ctx = &brw->intel.ctx;
struct gl_context *ctx = &intel->ctx;
struct brw_vs_prog_key key; struct brw_vs_prog_key key;
/* BRW_NEW_VERTEX_PROGRAM */ /* BRW_NEW_VERTEX_PROGRAM */
struct brw_vertex_program *vp = struct brw_vertex_program *vp =
@ -424,7 +420,7 @@ static void brw_upload_vs_prog(struct brw_context *brw)
key.base.userclip_active = (ctx->Transform.ClipPlanesEnabled != 0); key.base.userclip_active = (ctx->Transform.ClipPlanesEnabled != 0);
key.base.uses_clip_distance = vp->program.UsesClipDistance; key.base.uses_clip_distance = vp->program.UsesClipDistance;
if (key.base.userclip_active && !key.base.uses_clip_distance) { if (key.base.userclip_active && !key.base.uses_clip_distance) {
if (intel->gen < 6) { if (brw->gen < 6) {
key.base.nr_userclip_plane_consts key.base.nr_userclip_plane_consts
= _mesa_bitcount_64(ctx->Transform.ClipPlanesEnabled); = _mesa_bitcount_64(ctx->Transform.ClipPlanesEnabled);
key.base.userclip_planes_enabled_gen_4_5 key.base.userclip_planes_enabled_gen_4_5
@ -436,7 +432,7 @@ static void brw_upload_vs_prog(struct brw_context *brw)
} }
/* _NEW_POLYGON */ /* _NEW_POLYGON */
if (intel->gen < 6) { if (brw->gen < 6) {
key.copy_edgeflag = (ctx->Polygon.FrontMode != GL_FILL || key.copy_edgeflag = (ctx->Polygon.FrontMode != GL_FILL ||
ctx->Polygon.BackMode != GL_FILL); ctx->Polygon.BackMode != GL_FILL);
} }
@ -445,7 +441,7 @@ static void brw_upload_vs_prog(struct brw_context *brw)
key.base.clamp_vertex_color = ctx->Light._ClampVertexColor; key.base.clamp_vertex_color = ctx->Light._ClampVertexColor;
/* _NEW_POINT */ /* _NEW_POINT */
if (intel->gen < 6 && ctx->Point.PointSprite) { if (brw->gen < 6 && ctx->Point.PointSprite) {
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
if (ctx->Point.CoordReplace[i]) if (ctx->Point.CoordReplace[i])
key.point_coord_replace |= (1 << i); key.point_coord_replace |= (1 << i);
@ -456,7 +452,7 @@ static void brw_upload_vs_prog(struct brw_context *brw)
brw_populate_sampler_prog_key_data(ctx, prog, &key.base.tex); brw_populate_sampler_prog_key_data(ctx, prog, &key.base.tex);
/* BRW_NEW_VERTICES */ /* BRW_NEW_VERTICES */
if (intel->gen < 8 && !brw->is_haswell) { if (brw->gen < 8 && !brw->is_haswell) {
/* Prior to Haswell, the hardware can't natively support GL_FIXED or /* Prior to Haswell, the hardware can't natively support GL_FIXED or
* 2_10_10_10_REV vertex formats. Set appropriate workaround flags. * 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
*/ */

View File

@ -39,7 +39,6 @@
static void static void
brw_upload_vs_unit(struct brw_context *brw) brw_upload_vs_unit(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel;
struct brw_vs_unit_state *vs; struct brw_vs_unit_state *vs;
vs = brw_state_batch(brw, AUB_TRACE_VS_STATE, vs = brw_state_batch(brw, AUB_TRACE_VS_STATE,
@ -69,7 +68,7 @@ brw_upload_vs_unit(struct brw_context *brw)
* The most notable and reliably failing application is the Humus * The most notable and reliably failing application is the Humus
* demo "CelShading" * demo "CelShading"
*/ */
vs->thread1.single_program_flow = (intel->gen == 5); vs->thread1.single_program_flow = (brw->gen == 5);
vs->thread1.binding_table_entry_count = 0; vs->thread1.binding_table_entry_count = 0;
@ -93,7 +92,7 @@ brw_upload_vs_unit(struct brw_context *brw)
vs->thread3.const_urb_entry_read_offset = brw->curbe.vs_start * 2; vs->thread3.const_urb_entry_read_offset = brw->curbe.vs_start * 2;
/* BRW_NEW_URB_FENCE */ /* BRW_NEW_URB_FENCE */
if (intel->gen == 5) { if (brw->gen == 5) {
switch (brw->urb.nr_vs_entries) { switch (brw->urb.nr_vs_entries) {
case 8: case 8:
case 12: case 12:

View File

@ -158,11 +158,11 @@ void brwInitVtbl( struct brw_context *brw )
brw->vtbl.finish_batch = brw_finish_batch; brw->vtbl.finish_batch = brw_finish_batch;
brw->vtbl.destroy = brw_destroy_context; brw->vtbl.destroy = brw_destroy_context;
assert(brw->intel.gen >= 4); assert(brw->gen >= 4);
if (brw->intel.gen >= 7) { if (brw->gen >= 7) {
gen7_init_vtable_surface_functions(brw); gen7_init_vtable_surface_functions(brw);
brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz; brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
} else if (brw->intel.gen >= 4) { } else if (brw->gen >= 4) {
gen4_init_vtable_surface_functions(brw); gen4_init_vtable_surface_functions(brw);
brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz; brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
} }

View File

@ -349,7 +349,6 @@ static void brw_wm_populate_key( struct brw_context *brw,
struct brw_wm_prog_key *key ) struct brw_wm_prog_key *key )
{ {
struct gl_context *ctx = &brw->intel.ctx; struct gl_context *ctx = &brw->intel.ctx;
struct intel_context *intel = &brw->intel;
/* BRW_NEW_FRAGMENT_PROGRAM */ /* BRW_NEW_FRAGMENT_PROGRAM */
const struct brw_fragment_program *fp = const struct brw_fragment_program *fp =
(struct brw_fragment_program *)brw->fragment_program; (struct brw_fragment_program *)brw->fragment_program;
@ -362,7 +361,7 @@ static void brw_wm_populate_key( struct brw_context *brw,
/* Build the index for table lookup /* Build the index for table lookup
*/ */
if (intel->gen < 6) { if (brw->gen < 6) {
/* _NEW_COLOR */ /* _NEW_COLOR */
if (fp->program.UsesKill || ctx->Color.AlphaEnabled) if (fp->program.UsesKill || ctx->Color.AlphaEnabled)
lookup |= IZ_PS_KILL_ALPHATEST_BIT; lookup |= IZ_PS_KILL_ALPHATEST_BIT;
@ -416,7 +415,7 @@ static void brw_wm_populate_key( struct brw_context *brw,
key->line_aa = line_aa; key->line_aa = line_aa;
if (intel->gen < 6) if (brw->gen < 6)
key->stats_wm = brw->stats_wm; key->stats_wm = brw->stats_wm;
/* _NEW_LIGHT */ /* _NEW_LIGHT */
@ -465,7 +464,7 @@ static void brw_wm_populate_key( struct brw_context *brw,
(ctx->Multisample.SampleAlphaToCoverage || ctx->Color.AlphaEnabled); (ctx->Multisample.SampleAlphaToCoverage || ctx->Color.AlphaEnabled);
/* BRW_NEW_VUE_MAP_GEOM_OUT */ /* BRW_NEW_VUE_MAP_GEOM_OUT */
if (intel->gen < 6) if (brw->gen < 6)
key->input_slots_valid = brw->vue_map_geom_out.slots_valid; key->input_slots_valid = brw->vue_map_geom_out.slots_valid;
/* The unique fragment program ID */ /* The unique fragment program ID */

View File

@ -139,7 +139,7 @@ upload_default_color(struct brw_context *brw, struct gl_sampler_object *sampler,
if (firstImage->_BaseFormat == GL_RGB) if (firstImage->_BaseFormat == GL_RGB)
color[3] = 1.0; color[3] = 1.0;
if (intel->gen == 5 || intel->gen == 6) { if (brw->gen == 5 || brw->gen == 6) {
struct gen5_sampler_default_color *sdc; struct gen5_sampler_default_color *sdc;
sdc = brw_state_batch(brw, AUB_TRACE_SAMPLER_DEFAULT_COLOR, sdc = brw_state_batch(brw, AUB_TRACE_SAMPLER_DEFAULT_COLOR,
@ -268,7 +268,7 @@ static void brw_update_sampler_state(struct brw_context *brw,
sampler->ss1.t_wrap_mode = translate_wrap_mode(gl_sampler->WrapT, sampler->ss1.t_wrap_mode = translate_wrap_mode(gl_sampler->WrapT,
using_nearest); using_nearest);
if (intel->gen >= 6 && if (brw->gen >= 6 &&
sampler->ss0.min_filter != sampler->ss0.mag_filter) sampler->ss0.min_filter != sampler->ss0.mag_filter)
sampler->ss0.min_mag_neq = 1; sampler->ss0.min_mag_neq = 1;
@ -332,13 +332,13 @@ static void brw_update_sampler_state(struct brw_context *brw,
/* On Gen6+, the sampler can handle non-normalized texture /* On Gen6+, the sampler can handle non-normalized texture
* rectangle coordinates natively * rectangle coordinates natively
*/ */
if (intel->gen >= 6 && texObj->Target == GL_TEXTURE_RECTANGLE) { if (brw->gen >= 6 && texObj->Target == GL_TEXTURE_RECTANGLE) {
sampler->ss3.non_normalized_coord = 1; sampler->ss3.non_normalized_coord = 1;
} }
upload_default_color(brw, gl_sampler, unit, ss_index); upload_default_color(brw, gl_sampler, unit, ss_index);
if (intel->gen >= 6) { if (brw->gen >= 6) {
sampler->ss2.default_color_pointer = brw->wm.sdc_offset[ss_index] >> 5; sampler->ss2.default_color_pointer = brw->wm.sdc_offset[ss_index] >> 5;
} else { } else {
/* reloc */ /* reloc */

View File

@ -133,7 +133,7 @@ brw_upload_wm_unit(struct brw_context *brw)
/* BRW_NEW_CURBE_OFFSETS */ /* BRW_NEW_CURBE_OFFSETS */
wm->thread3.const_urb_entry_read_offset = brw->curbe.wm_start * 2; wm->thread3.const_urb_entry_read_offset = brw->curbe.wm_start * 2;
if (intel->gen == 5) if (brw->gen == 5)
wm->wm4.sampler_count = 0; /* hardware requirement */ wm->wm4.sampler_count = 0; /* hardware requirement */
else { else {
/* CACHE_NEW_SAMPLER */ /* CACHE_NEW_SAMPLER */

View File

@ -198,7 +198,6 @@ brw_update_buffer_texture_surface(struct gl_context *ctx,
unsigned surf_index) unsigned surf_index)
{ {
struct brw_context *brw = brw_context(ctx); struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = &brw->intel;
struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current; struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
uint32_t *surf; uint32_t *surf;
struct intel_buffer_object *intel_obj = struct intel_buffer_object *intel_obj =
@ -219,7 +218,7 @@ brw_update_buffer_texture_surface(struct gl_context *ctx,
surf[0] = (BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT | surf[0] = (BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
(brw_format_for_mesa_format(format) << BRW_SURFACE_FORMAT_SHIFT)); (brw_format_for_mesa_format(format) << BRW_SURFACE_FORMAT_SHIFT));
if (intel->gen >= 6) if (brw->gen >= 6)
surf[0] |= BRW_SURFACE_RC_READ_WRITE; surf[0] |= BRW_SURFACE_RC_READ_WRITE;
if (bo) { if (bo) {
@ -322,7 +321,6 @@ brw_create_constant_surface(struct brw_context *brw,
uint32_t *out_offset, uint32_t *out_offset,
bool dword_pitch) bool dword_pitch)
{ {
struct intel_context *intel = &brw->intel;
uint32_t stride = dword_pitch ? 4 : 16; uint32_t stride = dword_pitch ? 4 : 16;
uint32_t elements = ALIGN(size, stride) / stride; uint32_t elements = ALIGN(size, stride) / stride;
const GLint w = elements - 1; const GLint w = elements - 1;
@ -335,7 +333,7 @@ brw_create_constant_surface(struct brw_context *brw,
BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT | BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_SURFACE_FORMAT_SHIFT); BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_SURFACE_FORMAT_SHIFT);
if (intel->gen >= 6) if (brw->gen >= 6)
surf[0] |= BRW_SURFACE_RC_READ_WRITE; surf[0] |= BRW_SURFACE_RC_READ_WRITE;
surf[1] = bo->offset + offset; /* reloc */ surf[1] = bo->offset + offset; /* reloc */
@ -371,7 +369,6 @@ brw_update_sol_surface(struct brw_context *brw,
uint32_t *out_offset, unsigned num_vector_components, uint32_t *out_offset, unsigned num_vector_components,
unsigned stride_dwords, unsigned offset_dwords) unsigned stride_dwords, unsigned offset_dwords)
{ {
struct intel_context *intel = &brw->intel;
struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj); struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo, INTEL_WRITE_PART); drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo, INTEL_WRITE_PART);
uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
@ -455,7 +452,6 @@ static void
brw_upload_wm_pull_constants(struct brw_context *brw) brw_upload_wm_pull_constants(struct brw_context *brw)
{ {
struct gl_context *ctx = &brw->intel.ctx; struct gl_context *ctx = &brw->intel.ctx;
struct intel_context *intel = &brw->intel;
/* BRW_NEW_FRAGMENT_PROGRAM */ /* BRW_NEW_FRAGMENT_PROGRAM */
struct brw_fragment_program *fp = struct brw_fragment_program *fp =
(struct brw_fragment_program *) brw->fragment_program; (struct brw_fragment_program *) brw->fragment_program;
@ -527,8 +523,7 @@ brw_update_null_renderbuffer_surface(struct brw_context *brw, unsigned int unit)
* *
* - Surface Format must be R8G8B8A8_UNORM. * - Surface Format must be R8G8B8A8_UNORM.
*/ */
struct intel_context *intel = &brw->intel; struct gl_context *ctx = &brw->intel.ctx;
struct gl_context *ctx = &intel->ctx;
uint32_t *surf; uint32_t *surf;
unsigned surface_type = BRW_SURFACE_NULL; unsigned surface_type = BRW_SURFACE_NULL;
drm_intel_bo *bo = NULL; drm_intel_bo *bo = NULL;
@ -570,7 +565,7 @@ brw_update_null_renderbuffer_surface(struct brw_context *brw, unsigned int unit)
surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT | surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT); BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
if (intel->gen < 6) { if (brw->gen < 6) {
surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT | surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT | 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT | 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
@ -609,8 +604,7 @@ brw_update_renderbuffer_surface(struct brw_context *brw,
bool layered, bool layered,
unsigned int unit) unsigned int unit)
{ {
struct intel_context *intel = &brw->intel; struct gl_context *ctx = &brw->intel.ctx;
struct gl_context *ctx = &intel->ctx;
struct intel_renderbuffer *irb = intel_renderbuffer(rb); struct intel_renderbuffer *irb = intel_renderbuffer(rb);
struct intel_mipmap_tree *mt = irb->mt; struct intel_mipmap_tree *mt = irb->mt;
struct intel_region *region; struct intel_region *region;
@ -675,7 +669,7 @@ brw_update_renderbuffer_surface(struct brw_context *brw,
(tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT | (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
(mt->align_h == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0)); (mt->align_h == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
if (intel->gen < 6) { if (brw->gen < 6) {
/* _NEW_COLOR */ /* _NEW_COLOR */
if (!ctx->Color.ColorLogicOpEnabled && if (!ctx->Color.ColorLogicOpEnabled &&
(ctx->Color.BlendEnabled & (1 << unit))) (ctx->Color.BlendEnabled & (1 << unit)))
@ -756,8 +750,7 @@ const struct brw_tracked_state gen6_renderbuffer_surfaces = {
static void static void
brw_update_texture_surfaces(struct brw_context *brw) brw_update_texture_surfaces(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel; struct gl_context *ctx = &brw->intel.ctx;
struct gl_context *ctx = &intel->ctx;
/* BRW_NEW_VERTEX_PROGRAM and BRW_NEW_FRAGMENT_PROGRAM: /* BRW_NEW_VERTEX_PROGRAM and BRW_NEW_FRAGMENT_PROGRAM:
* Unfortunately, we're stuck using the gl_program structs until the * Unfortunately, we're stuck using the gl_program structs until the

View File

@ -106,7 +106,6 @@ void
gen6_blorp_emit_vertices(struct brw_context *brw, gen6_blorp_emit_vertices(struct brw_context *brw,
const brw_blorp_params *params) const brw_blorp_params *params)
{ {
struct intel_context *intel = &brw->intel;
uint32_t vertex_offset; uint32_t vertex_offset;
/* Setup VBO for the rectangle primitive.. /* Setup VBO for the rectangle primitive..
@ -161,7 +160,7 @@ gen6_blorp_emit_vertices(struct brw_context *brw,
uint32_t dw0 = GEN6_VB0_ACCESS_VERTEXDATA | uint32_t dw0 = GEN6_VB0_ACCESS_VERTEXDATA |
(GEN6_BLORP_NUM_VUE_ELEMS * sizeof(float)) << BRW_VB0_PITCH_SHIFT; (GEN6_BLORP_NUM_VUE_ELEMS * sizeof(float)) << BRW_VB0_PITCH_SHIFT;
if (intel->gen >= 7) if (brw->gen >= 7)
dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE; dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE;
BEGIN_BATCH(batch_length); BEGIN_BATCH(batch_length);
@ -554,9 +553,7 @@ void
gen6_blorp_emit_vs_disable(struct brw_context *brw, gen6_blorp_emit_vs_disable(struct brw_context *brw,
const brw_blorp_params *params) const brw_blorp_params *params)
{ {
struct intel_context *intel = &brw->intel; if (brw->gen == 6) {
if (intel->gen == 6) {
/* From the BSpec, Volume 2a, Part 3 "Vertex Shader", Section /* From the BSpec, Volume 2a, Part 3 "Vertex Shader", Section
* 3DSTATE_VS, Dword 5.0 "VS Function Enable": * 3DSTATE_VS, Dword 5.0 "VS Function Enable":
* *
@ -816,8 +813,7 @@ static void
gen6_blorp_emit_depth_stencil_config(struct brw_context *brw, gen6_blorp_emit_depth_stencil_config(struct brw_context *brw,
const brw_blorp_params *params) const brw_blorp_params *params)
{ {
struct intel_context *intel = &brw->intel; struct gl_context *ctx = &brw->intel.ctx;
struct gl_context *ctx = &intel->ctx;
uint32_t draw_x = params->depth.x_offset; uint32_t draw_x = params->depth.x_offset;
uint32_t draw_y = params->depth.y_offset; uint32_t draw_y = params->depth.y_offset;
uint32_t tile_mask_x, tile_mask_y; uint32_t tile_mask_x, tile_mask_y;

View File

@ -39,7 +39,6 @@ static void
gen6_upload_blend_state(struct brw_context *brw) gen6_upload_blend_state(struct brw_context *brw)
{ {
bool is_buffer_zero_integer_format = false; bool is_buffer_zero_integer_format = false;
struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &brw->intel.ctx; struct gl_context *ctx = &brw->intel.ctx;
struct gen6_blend_state *blend; struct gen6_blend_state *blend;
int b; int b;
@ -216,7 +215,7 @@ gen6_upload_blend_state(struct brw_context *brw)
blend[b].blend1.alpha_to_one = blend[b].blend1.alpha_to_one =
ctx->Multisample._Enabled && ctx->Multisample.SampleAlphaToOne; ctx->Multisample._Enabled && ctx->Multisample.SampleAlphaToOne;
blend[b].blend1.alpha_to_coverage_dither = (brw->intel.gen >= 7); blend[b].blend1.alpha_to_coverage_dither = (brw->gen >= 7);
} }
else { else {
blend[b].blend1.alpha_to_coverage = false; blend[b].blend1.alpha_to_coverage = false;
@ -225,7 +224,7 @@ gen6_upload_blend_state(struct brw_context *brw)
} }
/* Point the GPU at the new indirect state. */ /* Point the GPU at the new indirect state. */
if (intel->gen == 6) { if (brw->gen == 6) {
BEGIN_BATCH(4); BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (4 - 2)); OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (4 - 2));
OUT_BATCH(brw->cc.blend_state_offset | 1); OUT_BATCH(brw->cc.blend_state_offset | 1);
@ -255,7 +254,6 @@ static void
gen6_upload_color_calc_state(struct brw_context *brw) gen6_upload_color_calc_state(struct brw_context *brw)
{ {
struct gl_context *ctx = &brw->intel.ctx; struct gl_context *ctx = &brw->intel.ctx;
struct intel_context *intel = &brw->intel;
struct gen6_color_calc_state *cc; struct gen6_color_calc_state *cc;
cc = brw_state_batch(brw, AUB_TRACE_CC_STATE, cc = brw_state_batch(brw, AUB_TRACE_CC_STATE,
@ -277,7 +275,7 @@ gen6_upload_color_calc_state(struct brw_context *brw)
cc->constant_a = ctx->Color.BlendColorUnclamped[3]; cc->constant_a = ctx->Color.BlendColorUnclamped[3];
/* Point the GPU at the new indirect state. */ /* Point the GPU at the new indirect state. */
if (intel->gen == 6) { if (brw->gen == 6) {
BEGIN_BATCH(4); BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (4 - 2)); OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (4 - 2));
OUT_BATCH(0); OUT_BATCH(0);

View File

@ -35,7 +35,6 @@ static void
gen6_upload_depth_stencil_state(struct brw_context *brw) gen6_upload_depth_stencil_state(struct brw_context *brw)
{ {
struct gl_context *ctx = &brw->intel.ctx; struct gl_context *ctx = &brw->intel.ctx;
struct intel_context *intel = &brw->intel;
struct gen6_depth_stencil_state *ds; struct gen6_depth_stencil_state *ds;
struct intel_renderbuffer *depth_irb; struct intel_renderbuffer *depth_irb;
@ -88,7 +87,7 @@ gen6_upload_depth_stencil_state(struct brw_context *brw)
} }
/* Point the GPU at the new indirect state. */ /* Point the GPU at the new indirect state. */
if (intel->gen == 6) { if (brw->gen == 6) {
BEGIN_BATCH(4); BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (4 - 2)); OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (4 - 2));
OUT_BATCH(0); OUT_BATCH(0);

View File

@ -105,8 +105,6 @@ void
gen6_emit_3dstate_multisample(struct brw_context *brw, gen6_emit_3dstate_multisample(struct brw_context *brw,
unsigned num_samples) unsigned num_samples)
{ {
struct intel_context *intel = &brw->intel;
uint32_t number_of_multisamples = 0; uint32_t number_of_multisamples = 0;
uint32_t sample_positions_3210 = 0; uint32_t sample_positions_3210 = 0;
uint32_t sample_positions_7654 = 0; uint32_t sample_positions_7654 = 0;
@ -130,12 +128,12 @@ gen6_emit_3dstate_multisample(struct brw_context *brw,
break; break;
} }
int len = intel->gen >= 7 ? 4 : 3; int len = brw->gen >= 7 ? 4 : 3;
BEGIN_BATCH(len); BEGIN_BATCH(len);
OUT_BATCH(_3DSTATE_MULTISAMPLE << 16 | (len - 2)); OUT_BATCH(_3DSTATE_MULTISAMPLE << 16 | (len - 2));
OUT_BATCH(MS_PIXEL_LOCATION_CENTER | number_of_multisamples); OUT_BATCH(MS_PIXEL_LOCATION_CENTER | number_of_multisamples);
OUT_BATCH(sample_positions_3210); OUT_BATCH(sample_positions_3210);
if (intel->gen >= 7) if (brw->gen >= 7)
OUT_BATCH(sample_positions_7654); OUT_BATCH(sample_positions_7654);
ADVANCE_BATCH(); ADVANCE_BATCH();
} }
@ -166,8 +164,7 @@ gen6_emit_3dstate_sample_mask(struct brw_context *brw,
static void upload_multisample_state(struct brw_context *brw) static void upload_multisample_state(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel; struct gl_context *ctx = &brw->intel.ctx;
struct gl_context *ctx = &intel->ctx;
float coverage = 1.0; float coverage = 1.0;
float coverage_invert = false; float coverage_invert = false;
unsigned sample_mask = ~0u; unsigned sample_mask = ~0u;

View File

@ -45,9 +45,8 @@
static void static void
write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx) write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
{ {
struct intel_context *intel = &brw->intel;
/* Emit workaround flushes: */ /* Emit workaround flushes: */
if (intel->gen == 6) { if (brw->gen == 6) {
/* The timestamp write below is a non-zero post-sync op, which on /* The timestamp write below is a non-zero post-sync op, which on
* Gen6 necessitates a CS stall. CS stalls need stall at scoreboard * Gen6 necessitates a CS stall. CS stalls need stall at scoreboard
* set. See the comments for intel_emit_post_sync_nonzero_flush(). * set. See the comments for intel_emit_post_sync_nonzero_flush().
@ -78,9 +77,8 @@ write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
static void static void
write_depth_count(struct brw_context *brw, drm_intel_bo *query_bo, int idx) write_depth_count(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
{ {
struct intel_context *intel = &brw->intel;
/* Emit Sandybridge workaround flush: */ /* Emit Sandybridge workaround flush: */
if (intel->gen == 6) if (brw->gen == 6)
intel_emit_post_sync_nonzero_flush(brw); intel_emit_post_sync_nonzero_flush(brw);
BEGIN_BATCH(5); BEGIN_BATCH(5);
@ -107,8 +105,7 @@ static void
write_reg(struct brw_context *brw, write_reg(struct brw_context *brw,
drm_intel_bo *query_bo, uint32_t reg, int idx) drm_intel_bo *query_bo, uint32_t reg, int idx)
{ {
struct intel_context *intel = &brw->intel; assert(brw->gen >= 6);
assert(intel->gen >= 6);
intel_batchbuffer_emit_mi_flush(brw); intel_batchbuffer_emit_mi_flush(brw);
@ -141,8 +138,7 @@ static void
write_xfb_primitives_written(struct brw_context *brw, write_xfb_primitives_written(struct brw_context *brw,
drm_intel_bo *query_bo, int idx) drm_intel_bo *query_bo, int idx)
{ {
struct intel_context *intel = &brw->intel; if (brw->gen >= 7) {
if (intel->gen >= 7) {
write_reg(brw, query_bo, SO_NUM_PRIMS_WRITTEN0_IVB, idx); write_reg(brw, query_bo, SO_NUM_PRIMS_WRITTEN0_IVB, idx);
} else { } else {
write_reg(brw, query_bo, SO_NUM_PRIMS_WRITTEN, idx); write_reg(brw, query_bo, SO_NUM_PRIMS_WRITTEN, idx);

View File

@ -137,7 +137,6 @@ brw_begin_transform_feedback(struct gl_context *ctx, GLenum mode,
struct gl_transform_feedback_object *obj) struct gl_transform_feedback_object *obj)
{ {
struct brw_context *brw = brw_context(ctx); struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = &brw->intel;
const struct gl_shader_program *vs_prog = const struct gl_shader_program *vs_prog =
ctx->Shader.CurrentVertexProgram; ctx->Shader.CurrentVertexProgram;
const struct gl_transform_feedback_info *linked_xfb_info = const struct gl_transform_feedback_info *linked_xfb_info =
@ -145,7 +144,7 @@ brw_begin_transform_feedback(struct gl_context *ctx, GLenum mode,
struct gl_transform_feedback_object *xfb_obj = struct gl_transform_feedback_object *xfb_obj =
ctx->TransformFeedback.CurrentObject; ctx->TransformFeedback.CurrentObject;
assert(intel->gen == 6); assert(brw->gen == 6);
/* Compute the maximum number of vertices that we can write without /* Compute the maximum number of vertices that we can write without
* overflowing any of the buffers currently being used for feedback. * overflowing any of the buffers currently being used for feedback.

View File

@ -56,10 +56,8 @@
void void
gen7_allocate_push_constants(struct brw_context *brw) gen7_allocate_push_constants(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel;
unsigned size = 8; unsigned size = 8;
if (brw->is_haswell && intel->gt == 3) if (brw->is_haswell && brw->gt == 3)
size = 16; size = 16;
BEGIN_BATCH(2); BEGIN_BATCH(2);
@ -76,8 +74,7 @@ gen7_allocate_push_constants(struct brw_context *brw)
static void static void
gen7_upload_urb(struct brw_context *brw) gen7_upload_urb(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel; const int push_size_kB = brw->is_haswell && brw->gt == 3 ? 32 : 16;
const int push_size_kB = brw->is_haswell && intel->gt == 3 ? 32 : 16;
/* Total space for entries is URB size - 16kB for push constants */ /* Total space for entries is URB size - 16kB for push constants */
int handle_region_size = (brw->urb.size - push_size_kB) * 1024; /* bytes */ int handle_region_size = (brw->urb.size - push_size_kB) * 1024; /* bytes */

View File

@ -58,10 +58,9 @@ clear_cache(struct brw_context *brw)
void void
intel_batchbuffer_init(struct brw_context *brw) intel_batchbuffer_init(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel;
intel_batchbuffer_reset(brw); intel_batchbuffer_reset(brw);
if (intel->gen >= 6) { if (brw->gen >= 6) {
/* We can't just use brw_state_batch to get a chunk of space for /* We can't just use brw_state_batch to get a chunk of space for
* the gen6 workaround because it involves actually writing to * the gen6 workaround because it involves actually writing to
* the buffer, and the kernel doesn't let us write to the batch. * the buffer, and the kernel doesn't let us write to the batch.
@ -176,7 +175,6 @@ do_batch_dump(struct brw_context *brw)
static int static int
do_flush_locked(struct brw_context *brw) do_flush_locked(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel;
struct intel_batchbuffer *batch = &brw->batch; struct intel_batchbuffer *batch = &brw->batch;
int ret = 0; int ret = 0;
@ -195,7 +193,7 @@ do_flush_locked(struct brw_context *brw)
if (!brw->intelScreen->no_hw) { if (!brw->intelScreen->no_hw) {
int flags; int flags;
if (intel->gen < 6 || !batch->is_blit) { if (brw->gen < 6 || !batch->is_blit) {
flags = I915_EXEC_RENDER; flags = I915_EXEC_RENDER;
} else { } else {
flags = I915_EXEC_BLT; flags = I915_EXEC_BLT;
@ -396,8 +394,7 @@ emit:
void void
intel_emit_depth_stall_flushes(struct brw_context *brw) intel_emit_depth_stall_flushes(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel; assert(brw->gen >= 6 && brw->gen <= 7);
assert(intel->gen >= 6 && intel->gen <= 7);
BEGIN_BATCH(4); BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2)); OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
@ -432,8 +429,7 @@ intel_emit_depth_stall_flushes(struct brw_context *brw)
void void
gen7_emit_vs_workaround_flush(struct brw_context *brw) gen7_emit_vs_workaround_flush(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel; assert(brw->gen == 7);
assert(intel->gen == 7);
BEGIN_BATCH(4); BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2)); OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
@ -515,8 +511,7 @@ intel_emit_post_sync_nonzero_flush(struct brw_context *brw)
void void
intel_batchbuffer_emit_mi_flush(struct brw_context *brw) intel_batchbuffer_emit_mi_flush(struct brw_context *brw)
{ {
struct intel_context *intel = &brw->intel; if (brw->gen >= 6) {
if (intel->gen >= 6) {
if (brw->batch.is_blit) { if (brw->batch.is_blit) {
BEGIN_BATCH_BLT(4); BEGIN_BATCH_BLT(4);
OUT_BATCH(MI_FLUSH_DW); OUT_BATCH(MI_FLUSH_DW);
@ -525,7 +520,7 @@ intel_batchbuffer_emit_mi_flush(struct brw_context *brw)
OUT_BATCH(0); OUT_BATCH(0);
ADVANCE_BATCH(); ADVANCE_BATCH();
} else { } else {
if (intel->gen == 6) { if (brw->gen == 6) {
/* Hardware workaround: SNB B-Spec says: /* Hardware workaround: SNB B-Spec says:
* *
* [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache

View File

@ -101,8 +101,7 @@ intel_batchbuffer_emit_float(struct brw_context *brw, float f)
static INLINE void static INLINE void
intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz, int is_blit) intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz, int is_blit)
{ {
struct intel_context *intel = &brw->intel; if (brw->gen >= 6 &&
if (intel->gen >= 6 &&
brw->batch.is_blit != is_blit && brw->batch.used) { brw->batch.is_blit != is_blit && brw->batch.used) {
intel_batchbuffer_flush(brw); intel_batchbuffer_flush(brw);
} }

View File

@ -104,8 +104,7 @@ static void
set_blitter_tiling(struct brw_context *brw, set_blitter_tiling(struct brw_context *brw,
bool dst_y_tiled, bool src_y_tiled) bool dst_y_tiled, bool src_y_tiled)
{ {
struct intel_context *intel = &brw->intel; assert(brw->gen >= 6);
assert(intel->gen >= 6);
/* Idle the blitter before we update how tiling is interpreted. */ /* Idle the blitter before we update how tiling is interpreted. */
OUT_BATCH(MI_FLUSH_DW); OUT_BATCH(MI_FLUSH_DW);
@ -279,7 +278,6 @@ intelEmitCopyBlit(struct brw_context *brw,
GLshort w, GLshort h, GLshort w, GLshort h,
GLenum logic_op) GLenum logic_op)
{ {
struct intel_context *intel = &brw->intel;
GLuint CMD, BR13, pass = 0; GLuint CMD, BR13, pass = 0;
int dst_y2 = dst_y + h; int dst_y2 = dst_y + h;
int dst_x2 = dst_x + w; int dst_x2 = dst_x + w;
@ -296,7 +294,7 @@ intelEmitCopyBlit(struct brw_context *brw,
if (src_offset & 4095) if (src_offset & 4095)
return false; return false;
} }
if ((dst_y_tiled || src_y_tiled) && intel->gen < 6) if ((dst_y_tiled || src_y_tiled) && brw->gen < 6)
return false; return false;
/* do space check before going any further */ /* do space check before going any further */

View File

@ -94,8 +94,7 @@ void
intel_resolve_for_dri2_flush(struct brw_context *brw, intel_resolve_for_dri2_flush(struct brw_context *brw,
__DRIdrawable *drawable) __DRIdrawable *drawable)
{ {
struct intel_context *intel = &brw->intel; if (brw->gen < 6) {
if (intel->gen < 6) {
/* MSAA and fast color clear are not supported, so don't waste time /* MSAA and fast color clear are not supported, so don't waste time
* checking whether a resolve is needed. * checking whether a resolve is needed.
*/ */
@ -474,30 +473,30 @@ intelInitContext(struct brw_context *brw,
driContextPriv->driverPrivate = brw; driContextPriv->driverPrivate = brw;
brw->driContext = driContextPriv; brw->driContext = driContextPriv;
intel->gen = intelScreen->gen; brw->gen = intelScreen->gen;
const int devID = intelScreen->deviceID; const int devID = intelScreen->deviceID;
if (IS_SNB_GT1(devID) || IS_IVB_GT1(devID) || IS_HSW_GT1(devID)) if (IS_SNB_GT1(devID) || IS_IVB_GT1(devID) || IS_HSW_GT1(devID))
intel->gt = 1; brw->gt = 1;
else if (IS_SNB_GT2(devID) || IS_IVB_GT2(devID) || IS_HSW_GT2(devID)) else if (IS_SNB_GT2(devID) || IS_IVB_GT2(devID) || IS_HSW_GT2(devID))
intel->gt = 2; brw->gt = 2;
else if (IS_HSW_GT3(devID)) else if (IS_HSW_GT3(devID))
intel->gt = 3; brw->gt = 3;
else else
intel->gt = 0; brw->gt = 0;
if (IS_HASWELL(devID)) { if (IS_HASWELL(devID)) {
brw->is_haswell = true; brw->is_haswell = true;
} else if (IS_BAYTRAIL(devID)) { } else if (IS_BAYTRAIL(devID)) {
brw->is_baytrail = true; brw->is_baytrail = true;
intel->gt = 1; brw->gt = 1;
} else if (IS_G4X(devID)) { } else if (IS_G4X(devID)) {
brw->is_g4x = true; brw->is_g4x = true;
} }
brw->has_separate_stencil = brw->intelScreen->hw_has_separate_stencil; brw->has_separate_stencil = brw->intelScreen->hw_has_separate_stencil;
brw->must_use_separate_stencil = brw->intelScreen->hw_must_use_separate_stencil; brw->must_use_separate_stencil = brw->intelScreen->hw_must_use_separate_stencil;
brw->has_hiz = intel->gen >= 6; brw->has_hiz = brw->gen >= 6;
brw->has_llc = brw->intelScreen->hw_has_llc; brw->has_llc = brw->intelScreen->hw_has_llc;
brw->has_swizzling = brw->intelScreen->hw_has_swizzling; brw->has_swizzling = brw->intelScreen->hw_has_swizzling;
@ -560,7 +559,7 @@ intelInitContext(struct brw_context *brw,
INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control); INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
if (INTEL_DEBUG & DEBUG_BUFMGR) if (INTEL_DEBUG & DEBUG_BUFMGR)
dri_bufmgr_set_debug(brw->bufmgr, true); dri_bufmgr_set_debug(brw->bufmgr, true);
if ((INTEL_DEBUG & DEBUG_SHADER_TIME) && intel->gen < 7) { if ((INTEL_DEBUG & DEBUG_SHADER_TIME) && brw->gen < 7) {
fprintf(stderr, fprintf(stderr,
"shader_time debugging requires gen7 (Ivybridge) or better.\n"); "shader_time debugging requires gen7 (Ivybridge) or better.\n");
INTEL_DEBUG &= ~DEBUG_SHADER_TIME; INTEL_DEBUG &= ~DEBUG_SHADER_TIME;
@ -578,7 +577,7 @@ intelInitContext(struct brw_context *brw,
if (!driQueryOptionb(&brw->optionCache, "hiz")) { if (!driQueryOptionb(&brw->optionCache, "hiz")) {
brw->has_hiz = false; brw->has_hiz = false;
/* On gen6, you can only do separate stencil with HIZ. */ /* On gen6, you can only do separate stencil with HIZ. */
if (intel->gen == 6) if (brw->gen == 6)
brw->has_separate_stencil = false; brw->has_separate_stencil = false;
} }

View File

@ -112,12 +112,6 @@ struct intel_batchbuffer {
struct intel_context struct intel_context
{ {
struct gl_context ctx; /**< base class, must be first field */ struct gl_context ctx; /**< base class, must be first field */
/**
* Generation number of the hardware: 2 is 8xx, 3 is 9xx pre-965, 4 is 965.
*/
int gen;
int gt;
}; };
/** /**

View File

@ -40,9 +40,8 @@ void
intelInitExtensions(struct gl_context *ctx) intelInitExtensions(struct gl_context *ctx)
{ {
struct brw_context *brw = brw_context(ctx); struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
assert(intel->gen >= 4); assert(brw->gen >= 4);
ctx->Extensions.ARB_depth_buffer_float = true; ctx->Extensions.ARB_depth_buffer_float = true;
ctx->Extensions.ARB_depth_clamp = true; ctx->Extensions.ARB_depth_clamp = true;
@ -124,13 +123,13 @@ intelInitExtensions(struct gl_context *ctx)
ctx->Extensions.OES_draw_texture = true; ctx->Extensions.OES_draw_texture = true;
ctx->Extensions.OES_standard_derivatives = true; ctx->Extensions.OES_standard_derivatives = true;
if (intel->gen >= 6) if (brw->gen >= 6)
ctx->Const.GLSLVersion = 140; ctx->Const.GLSLVersion = 140;
else else
ctx->Const.GLSLVersion = 120; ctx->Const.GLSLVersion = 120;
_mesa_override_glsl_version(ctx); _mesa_override_glsl_version(ctx);
if (intel->gen >= 6) { if (brw->gen >= 6) {
uint64_t dummy; uint64_t dummy;
ctx->Extensions.EXT_framebuffer_multisample = true; ctx->Extensions.EXT_framebuffer_multisample = true;
@ -152,7 +151,7 @@ intelInitExtensions(struct gl_context *ctx)
ctx->Extensions.ARB_timer_query = true; ctx->Extensions.ARB_timer_query = true;
} }
if (intel->gen >= 5) { if (brw->gen >= 5) {
ctx->Extensions.ARB_texture_query_lod = true; ctx->Extensions.ARB_texture_query_lod = true;
ctx->Extensions.EXT_timer_query = true; ctx->Extensions.EXT_timer_query = true;
} }

View File

@ -550,7 +550,6 @@ static void
intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb) intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
{ {
struct brw_context *brw = brw_context(ctx); struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct intel_renderbuffer *depthRb = struct intel_renderbuffer *depthRb =
intel_get_renderbuffer(fb, BUFFER_DEPTH); intel_get_renderbuffer(fb, BUFFER_DEPTH);
struct intel_renderbuffer *stencilRb = struct intel_renderbuffer *stencilRb =
@ -596,7 +595,7 @@ intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
"instead of S8\n", "instead of S8\n",
_mesa_get_format_name(stencil_mt->format)); _mesa_get_format_name(stencil_mt->format));
} }
if (intel->gen < 7 && !intel_renderbuffer_has_hiz(depthRb)) { if (brw->gen < 7 && !intel_renderbuffer_has_hiz(depthRb)) {
/* Before Gen7, separate depth and stencil buffers can be used /* Before Gen7, separate depth and stencil buffers can be used
* only if HiZ is enabled. From the Sandybridge PRM, Volume 2, * only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
* Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable: * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:

View File

@ -71,9 +71,8 @@ target_to_target(GLenum target)
static enum intel_msaa_layout static enum intel_msaa_layout
compute_msaa_layout(struct brw_context *brw, gl_format format, GLenum target) compute_msaa_layout(struct brw_context *brw, gl_format format, GLenum target)
{ {
struct intel_context *intel = &brw->intel;
/* Prior to Gen7, all MSAA surfaces used IMS layout. */ /* Prior to Gen7, all MSAA surfaces used IMS layout. */
if (intel->gen < 7) if (brw->gen < 7)
return INTEL_MSAA_LAYOUT_IMS; return INTEL_MSAA_LAYOUT_IMS;
/* In Gen7, IMS layout is only used for depth and stencil buffers. */ /* In Gen7, IMS layout is only used for depth and stencil buffers. */
@ -96,7 +95,7 @@ compute_msaa_layout(struct brw_context *brw, gl_format format, GLenum target)
*/ */
if (_mesa_get_format_datatype(format) == GL_INT) { if (_mesa_get_format_datatype(format) == GL_INT) {
/* TODO: is this workaround needed for future chipsets? */ /* TODO: is this workaround needed for future chipsets? */
assert(intel->gen == 7); assert(brw->gen == 7);
return INTEL_MSAA_LAYOUT_UMS; return INTEL_MSAA_LAYOUT_UMS;
} else { } else {
/* For now, if we're going to be texturing from this surface, /* For now, if we're going to be texturing from this surface,
@ -201,10 +200,8 @@ bool
intel_is_non_msrt_mcs_buffer_supported(struct brw_context *brw, intel_is_non_msrt_mcs_buffer_supported(struct brw_context *brw,
struct intel_mipmap_tree *mt) struct intel_mipmap_tree *mt)
{ {
struct intel_context *intel = &brw->intel;
/* MCS support does not exist prior to Gen7 */ /* MCS support does not exist prior to Gen7 */
if (intel->gen < 7) if (brw->gen < 7)
return false; return false;
/* MCS is only supported for color buffers */ /* MCS is only supported for color buffers */
@ -415,7 +412,6 @@ intel_miptree_choose_tiling(struct brw_context *brw,
enum intel_miptree_tiling_mode requested, enum intel_miptree_tiling_mode requested,
struct intel_mipmap_tree *mt) struct intel_mipmap_tree *mt)
{ {
struct intel_context *intel = &brw->intel;
if (format == MESA_FORMAT_S8) { if (format == MESA_FORMAT_S8) {
/* The stencil buffer is W tiled. However, we request from the kernel a /* The stencil buffer is W tiled. However, we request from the kernel a
* non-tiled buffer because the GTT is incapable of W fencing. * non-tiled buffer because the GTT is incapable of W fencing.
@ -469,7 +465,7 @@ intel_miptree_choose_tiling(struct brw_context *brw,
} }
/* Pre-gen6 doesn't have BLORP to handle Y-tiling, so use X-tiling. */ /* Pre-gen6 doesn't have BLORP to handle Y-tiling, so use X-tiling. */
if (intel->gen < 6) if (brw->gen < 6)
return I915_TILING_X; return I915_TILING_X;
return I915_TILING_Y | I915_TILING_X; return I915_TILING_Y | I915_TILING_X;
@ -1131,8 +1127,7 @@ intel_miptree_alloc_mcs(struct brw_context *brw,
struct intel_mipmap_tree *mt, struct intel_mipmap_tree *mt,
GLuint num_samples) GLuint num_samples)
{ {
struct intel_context *intel = &brw->intel; assert(brw->gen >= 7); /* MCS only used on Gen7+ */
assert(intel->gen >= 7); /* MCS only used on Gen7+ */
assert(mt->mcs_mt == NULL); assert(mt->mcs_mt == NULL);
/* Choose the correct format for the MCS buffer. All that really matters /* Choose the correct format for the MCS buffer. All that really matters
@ -2104,7 +2099,6 @@ intel_miptree_map_singlesample(struct brw_context *brw,
void **out_ptr, void **out_ptr,
int *out_stride) int *out_stride)
{ {
struct intel_context *intel = &brw->intel;
struct intel_miptree_map *map; struct intel_miptree_map *map;
assert(mt->num_samples <= 1); assert(mt->num_samples <= 1);
@ -2134,7 +2128,7 @@ intel_miptree_map_singlesample(struct brw_context *brw,
!(mode & GL_MAP_WRITE_BIT) && !(mode & GL_MAP_WRITE_BIT) &&
!mt->compressed && !mt->compressed &&
(mt->region->tiling == I915_TILING_X || (mt->region->tiling == I915_TILING_X ||
(intel->gen >= 6 && mt->region->tiling == I915_TILING_Y)) && (brw->gen >= 6 && mt->region->tiling == I915_TILING_Y)) &&
mt->region->pitch < 32768) { mt->region->pitch < 32768) {
intel_miptree_map_blit(brw, mt, map, level, slice); intel_miptree_map_blit(brw, mt, map, level, slice);
} else if (mt->region->tiling != I915_TILING_NONE && } else if (mt->region->tiling != I915_TILING_NONE &&

View File

@ -52,7 +52,6 @@ intel_blit_texsubimage(struct gl_context * ctx,
const struct gl_pixelstore_attrib *packing) const struct gl_pixelstore_attrib *packing)
{ {
struct brw_context *brw = brw_context(ctx); struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct intel_texture_image *intelImage = intel_texture_image(texImage); struct intel_texture_image *intelImage = intel_texture_image(texImage);
/* Try to do a blit upload of the subimage if the texture is /* Try to do a blit upload of the subimage if the texture is
@ -71,7 +70,7 @@ intel_blit_texsubimage(struct gl_context * ctx,
/* On gen6, it's probably not worth swapping to the blit ring to do /* On gen6, it's probably not worth swapping to the blit ring to do
* this because of all the overhead involved. * this because of all the overhead involved.
*/ */
if (intel->gen >= 6) if (brw->gen >= 6)
return false; return false;
if (!drm_intel_bo_busy(intelImage->mt->region->bo)) if (!drm_intel_bo_busy(intelImage->mt->region->bo))

View File

@ -52,7 +52,7 @@ test_compact_instruction(struct brw_compile *p, struct brw_instruction src)
if (memcmp(&unchanged, &dst, sizeof(dst))) { if (memcmp(&unchanged, &dst, sizeof(dst))) {
fprintf(stderr, "Failed to compact, but dst changed\n"); fprintf(stderr, "Failed to compact, but dst changed\n");
fprintf(stderr, " Instruction: "); fprintf(stderr, " Instruction: ");
brw_disasm(stderr, &src, intel->gen); brw_disasm(stderr, &src, brw->gen);
return false; return false;
} }
} }
@ -297,10 +297,10 @@ main(int argc, char **argv)
{ {
struct brw_context *brw = calloc(1, sizeof(*brw)); struct brw_context *brw = calloc(1, sizeof(*brw));
struct intel_context *intel = &brw->intel; struct intel_context *intel = &brw->intel;
intel->gen = 6; brw->gen = 6;
bool fail = false; bool fail = false;
for (intel->gen = 6; intel->gen <= 7; intel->gen++) { for (brw->gen = 6; brw->gen <= 7; brw->gen++) {
fail |= run_tests(brw); fail |= run_tests(brw);
} }

View File

@ -107,7 +107,7 @@ void register_coalesce_test::SetUp()
_mesa_init_vertex_program(ctx, &vp->program, GL_VERTEX_SHADER, 0); _mesa_init_vertex_program(ctx, &vp->program, GL_VERTEX_SHADER, 0);
intel->gen = 4; brw->gen = 4;
} }
static void static void