i965: Use brw_stage_state for WM data as well.

This gets the VS, GS, and PS all using the same data structure.

Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Paul Berry <stereotype441@gmail.com>
This commit is contained in:
Kenneth Graunke 2013-09-01 17:31:54 -07:00
parent e6e5f88848
commit 4b3c0a797f
13 changed files with 71 additions and 93 deletions

View File

@ -1237,43 +1237,17 @@ struct brw_context
} sf;
struct {
struct brw_stage_state base;
struct brw_wm_prog_data *prog_data;
GLuint render_surf;
drm_intel_bo *scratch_bo;
/**
* Buffer object used in place of multisampled null render targets on
* Gen6. See brw_update_null_renderbuffer_surface().
*/
drm_intel_bo *multisampled_null_render_target_bo;
/** Offset in the program cache to the WM program */
uint32_t prog_offset;
uint32_t state_offset; /* offset in batchbuffer to pre-gen6 WM state */
drm_intel_bo *const_bo; /* pull constant buffer. */
/**
* This is offset in the batch to the push constants on gen6.
*
* Pre-gen6, push constants live in the CURBE.
*/
uint32_t push_const_offset;
/** Binding table of pointers to surf_bo entries */
uint32_t bind_bo_offset;
uint32_t surf_offset[BRW_MAX_WM_SURFACES];
/** SAMPLER_STATE count and table offset */
uint32_t sampler_count;
uint32_t sampler_offset;
/** Offsets in the batch to sampler default colors (texture border color)
*/
uint32_t sdc_offset[BRW_MAX_TEX_UNIT];
struct {
struct ra_regs *regs;

View File

@ -333,7 +333,8 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
* won't work since ARB programs use the texture unit number as the sampler
* index.
*/
brw->wm.sampler_count = _mesa_fls(ctx->FragmentProgram._Current->Base.SamplersUsed);
brw->wm.base.sampler_count =
_mesa_fls(ctx->FragmentProgram._Current->Base.SamplersUsed);
brw->gs.base.sampler_count = ctx->GeometryProgram._Current ?
_mesa_fls(ctx->GeometryProgram._Current->Base.SamplersUsed) : 0;
brw->vs.base.sampler_count =

View File

@ -3186,12 +3186,12 @@ brw_fs_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
key.program_string_id = bfp->id;
uint32_t old_prog_offset = brw->wm.prog_offset;
uint32_t old_prog_offset = brw->wm.base.prog_offset;
struct brw_wm_prog_data *old_prog_data = brw->wm.prog_data;
bool success = do_wm_prog(brw, prog, bfp, &key);
brw->wm.prog_offset = old_prog_offset;
brw->wm.base.prog_offset = old_prog_offset;
brw->wm.prog_data = old_prog_data;
return success;

View File

@ -81,7 +81,7 @@ static void upload_binding_table_pointers(struct brw_context *brw)
OUT_BATCH(0); /* gs */
OUT_BATCH(0); /* clip */
OUT_BATCH(0); /* sf */
OUT_BATCH(brw->wm.bind_bo_offset);
OUT_BATCH(brw->wm.base.bind_bo_offset);
ADVANCE_BATCH();
}
@ -115,7 +115,7 @@ static void upload_gen6_binding_table_pointers(struct brw_context *brw)
(4 - 2));
OUT_BATCH(brw->vs.base.bind_bo_offset); /* vs */
OUT_BATCH(brw->ff_gs.bind_bo_offset); /* gs */
OUT_BATCH(brw->wm.bind_bo_offset); /* wm/ps */
OUT_BATCH(brw->wm.base.bind_bo_offset); /* wm/ps */
ADVANCE_BATCH();
}
@ -161,7 +161,7 @@ static void upload_pipelined_state_pointers(struct brw_context *brw )
OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
brw->sf.state_offset);
OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
brw->wm.state_offset);
brw->wm.base.state_offset);
OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
brw->cc.state_offset);
ADVANCE_BATCH();

View File

@ -82,7 +82,7 @@ brw_destroy_context(struct brw_context *brw)
dri_bo_release(&brw->curbe.curbe_bo);
dri_bo_release(&brw->vs.base.const_bo);
dri_bo_release(&brw->wm.const_bo);
dri_bo_release(&brw->wm.base.const_bo);
free(brw->curbe.last_buf);
free(brw->curbe.next_buf);

View File

@ -183,7 +183,7 @@ bool do_wm_prog(struct brw_context *brw,
c->prog_data.total_scratch = brw_get_scratch_size(c->last_scratch);
brw_get_scratch_bo(brw, &brw->wm.scratch_bo,
brw_get_scratch_bo(brw, &brw->wm.base.scratch_bo,
c->prog_data.total_scratch * brw->max_wm_threads);
}
@ -194,7 +194,7 @@ bool do_wm_prog(struct brw_context *brw,
&c->key, sizeof(c->key),
program, program_size,
&c->prog_data, sizeof(c->prog_data),
&brw->wm.prog_offset, &brw->wm.prog_data);
&brw->wm.base.prog_offset, &brw->wm.prog_data);
ralloc_free(c);
@ -426,7 +426,7 @@ static void brw_wm_populate_key( struct brw_context *brw,
key->clamp_fragment_color = ctx->Color._ClampFragmentColor;
/* _NEW_TEXTURE */
brw_populate_sampler_prog_key_data(ctx, prog, brw->wm.sampler_count,
brw_populate_sampler_prog_key_data(ctx, prog, brw->wm.base.sampler_count,
&key->tex);
/* _NEW_BUFFERS */
@ -486,7 +486,7 @@ brw_upload_wm_prog(struct brw_context *brw)
if (!brw_search_cache(&brw->cache, BRW_WM_PROG,
&key, sizeof(key),
&brw->wm.prog_offset, &brw->wm.prog_data)) {
&brw->wm.base.prog_offset, &brw->wm.prog_data)) {
bool success = do_wm_prog(brw, ctx->Shader._CurrentFragmentProgram, fp,
&key);
(void) success;

View File

@ -403,9 +403,9 @@ brw_upload_fs_samplers(struct brw_context *brw)
/* BRW_NEW_FRAGMENT_PROGRAM */
struct gl_program *fs = (struct gl_program *) brw->fragment_program;
brw->vtbl.upload_sampler_state_table(brw, fs,
brw->wm.sampler_count,
&brw->wm.sampler_offset,
brw->wm.sdc_offset);
brw->wm.base.sampler_count,
&brw->wm.base.sampler_offset,
brw->wm.base.sdc_offset);
}
const struct brw_tracked_state brw_fs_samplers = {

View File

@ -78,7 +78,7 @@ brw_upload_wm_unit(struct brw_context *brw)
struct brw_wm_unit_state *wm;
wm = brw_state_batch(brw, AUB_TRACE_WM_STATE,
sizeof(*wm), 32, &brw->wm.state_offset);
sizeof(*wm), 32, &brw->wm.base.state_offset);
memset(wm, 0, sizeof(*wm));
if (brw->wm.prog_data->prog_offset_16) {
@ -96,16 +96,16 @@ brw_upload_wm_unit(struct brw_context *brw)
wm->thread0.kernel_start_pointer =
brw_program_reloc(brw,
brw->wm.state_offset +
brw->wm.base.state_offset +
offsetof(struct brw_wm_unit_state, thread0),
brw->wm.prog_offset +
brw->wm.base.prog_offset +
(wm->thread0.grf_reg_count << 1)) >> 6;
wm->wm9.kernel_start_pointer_2 =
brw_program_reloc(brw,
brw->wm.state_offset +
brw->wm.base.state_offset +
offsetof(struct brw_wm_unit_state, wm9),
brw->wm.prog_offset +
brw->wm.base.prog_offset +
brw->wm.prog_data->prog_offset_16 +
(wm->wm9.grf_reg_count_2 << 1)) >> 6;
@ -124,7 +124,7 @@ brw_upload_wm_unit(struct brw_context *brw)
if (brw->wm.prog_data->total_scratch != 0) {
wm->thread2.scratch_space_base_pointer =
brw->wm.scratch_bo->offset >> 10; /* reloc */
brw->wm.base.scratch_bo->offset >> 10; /* reloc */
wm->thread2.per_thread_scratch_space =
ffs(brw->wm.prog_data->total_scratch) - 11;
} else {
@ -144,13 +144,13 @@ brw_upload_wm_unit(struct brw_context *brw)
wm->wm4.sampler_count = 0; /* hardware requirement */
else {
/* CACHE_NEW_SAMPLER */
wm->wm4.sampler_count = (brw->wm.sampler_count + 1) / 4;
wm->wm4.sampler_count = (brw->wm.base.sampler_count + 1) / 4;
}
if (brw->wm.sampler_count) {
if (brw->wm.base.sampler_count) {
/* reloc */
wm->wm4.sampler_state_pointer = (brw->batch.bo->offset +
brw->wm.sampler_offset) >> 5;
brw->wm.base.sampler_offset) >> 5;
} else {
wm->wm4.sampler_state_pointer = 0;
}
@ -217,19 +217,19 @@ brw_upload_wm_unit(struct brw_context *brw)
/* Emit scratch space relocation */
if (brw->wm.prog_data->total_scratch != 0) {
drm_intel_bo_emit_reloc(brw->batch.bo,
brw->wm.state_offset +
brw->wm.base.state_offset +
offsetof(struct brw_wm_unit_state, thread2),
brw->wm.scratch_bo,
brw->wm.base.scratch_bo,
wm->thread2.per_thread_scratch_space,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
}
/* Emit sampler state relocation */
if (brw->wm.sampler_count != 0) {
if (brw->wm.base.sampler_count != 0) {
drm_intel_bo_emit_reloc(brw->batch.bo,
brw->wm.state_offset +
brw->wm.base.state_offset +
offsetof(struct brw_wm_unit_state, wm4),
brw->batch.bo, (brw->wm.sampler_offset |
brw->batch.bo, (brw->wm.base.sampler_offset |
wm->wm4.stats_enable |
(wm->wm4.sampler_count << 2)),
I915_GEM_DOMAIN_INSTRUCTION, 0);

View File

@ -453,29 +453,29 @@ brw_upload_wm_pull_constants(struct brw_context *brw)
/* CACHE_NEW_WM_PROG */
if (brw->wm.prog_data->nr_pull_params == 0) {
if (brw->wm.const_bo) {
drm_intel_bo_unreference(brw->wm.const_bo);
brw->wm.const_bo = NULL;
brw->wm.surf_offset[surf_index] = 0;
if (brw->wm.base.const_bo) {
drm_intel_bo_unreference(brw->wm.base.const_bo);
brw->wm.base.const_bo = NULL;
brw->wm.base.surf_offset[surf_index] = 0;
brw->state.dirty.brw |= BRW_NEW_SURFACES;
}
return;
}
drm_intel_bo_unreference(brw->wm.const_bo);
brw->wm.const_bo = drm_intel_bo_alloc(brw->bufmgr, "WM const bo",
drm_intel_bo_unreference(brw->wm.base.const_bo);
brw->wm.base.const_bo = drm_intel_bo_alloc(brw->bufmgr, "WM const bo",
size, 64);
/* _NEW_PROGRAM_CONSTANTS */
drm_intel_gem_bo_map_gtt(brw->wm.const_bo);
constants = brw->wm.const_bo->virtual;
drm_intel_gem_bo_map_gtt(brw->wm.base.const_bo);
constants = brw->wm.base.const_bo->virtual;
for (i = 0; i < brw->wm.prog_data->nr_pull_params; i++) {
constants[i] = *brw->wm.prog_data->pull_param[i];
}
drm_intel_gem_bo_unmap_gtt(brw->wm.const_bo);
drm_intel_gem_bo_unmap_gtt(brw->wm.base.const_bo);
brw->vtbl.create_constant_surface(brw, brw->wm.const_bo, 0, size,
&brw->wm.surf_offset[surf_index],
brw->vtbl.create_constant_surface(brw, brw->wm.base.const_bo, 0, size,
&brw->wm.base.surf_offset[surf_index],
true);
brw->state.dirty.brw |= BRW_NEW_SURFACES;
@ -522,7 +522,7 @@ brw_update_null_renderbuffer_surface(struct brw_context *brw, unsigned int unit)
const struct gl_framebuffer *fb = ctx->DrawBuffer;
surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
&brw->wm.surf_offset[SURF_INDEX_DRAW(unit)]);
&brw->wm.base.surf_offset[SURF_INDEX_DRAW(unit)]);
if (fb->Visual.samples > 1) {
/* On Gen6, null render targets seem to cause GPU hangs when
@ -575,7 +575,7 @@ brw_update_null_renderbuffer_surface(struct brw_context *brw, unsigned int unit)
if (bo) {
drm_intel_bo_emit_reloc(brw->batch.bo,
brw->wm.surf_offset[SURF_INDEX_DRAW(unit)] + 4,
brw->wm.base.surf_offset[SURF_INDEX_DRAW(unit)] + 4,
bo, 0,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
}
@ -624,7 +624,7 @@ brw_update_renderbuffer_surface(struct brw_context *brw,
region = irb->mt->region;
surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
&brw->wm.surf_offset[SURF_INDEX_DRAW(unit)]);
&brw->wm.base.surf_offset[SURF_INDEX_DRAW(unit)]);
format = brw->render_target_format[rb_format];
if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
@ -680,7 +680,7 @@ brw_update_renderbuffer_surface(struct brw_context *brw,
}
drm_intel_bo_emit_reloc(brw->batch.bo,
brw->wm.surf_offset[SURF_INDEX_DRAW(unit)] + 4,
brw->wm.base.surf_offset[SURF_INDEX_DRAW(unit)] + 4,
region->bo,
surf[1] - region->bo->offset,
I915_GEM_DOMAIN_RENDER,
@ -783,7 +783,7 @@ brw_update_texture_surfaces(struct brw_context *brw)
brw->gs.base.surf_offset +
SURF_INDEX_VEC4_TEXTURE(0));
update_stage_texture_surfaces(brw, fs,
brw->wm.surf_offset +
brw->wm.base.surf_offset +
SURF_INDEX_TEXTURE(0));
brw->state.dirty.brw |= BRW_NEW_SURFACES;
@ -844,7 +844,7 @@ brw_upload_wm_ubo_surfaces(struct brw_context *brw)
return;
brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
&brw->wm.surf_offset[SURF_INDEX_WM_UBO(0)]);
&brw->wm.base.surf_offset[SURF_INDEX_WM_UBO(0)]);
}
const struct brw_tracked_state brw_wm_ubo_surfaces = {
@ -867,18 +867,18 @@ brw_upload_wm_binding_table(struct brw_context *brw)
int i;
if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
gen7_create_shader_time_surface(brw, &brw->wm.surf_offset[SURF_INDEX_WM_SHADER_TIME]);
gen7_create_shader_time_surface(brw, &brw->wm.base.surf_offset[SURF_INDEX_WM_SHADER_TIME]);
}
/* CACHE_NEW_WM_PROG */
unsigned entries = brw->wm.prog_data->binding_table_size;
bind = brw_state_batch(brw, AUB_TRACE_BINDING_TABLE,
sizeof(uint32_t) * entries,
32, &brw->wm.bind_bo_offset);
32, &brw->wm.base.bind_bo_offset);
/* BRW_NEW_SURFACES */
for (i = 0; i < entries; i++) {
bind[i] = brw->wm.surf_offset[i];
bind[i] = brw->wm.base.surf_offset[i];
}
brw->state.dirty.brw |= BRW_NEW_PS_BINDING_TABLE;

View File

@ -41,7 +41,7 @@ upload_sampler_state_pointers(struct brw_context *brw)
(4 - 2));
OUT_BATCH(brw->vs.base.sampler_offset); /* VS */
OUT_BATCH(0); /* GS */
OUT_BATCH(brw->wm.sampler_offset);
OUT_BATCH(brw->wm.base.sampler_offset);
ADVANCE_BATCH();
}

View File

@ -56,7 +56,7 @@ gen6_upload_wm_push_constants(struct brw_context *brw)
constants = brw_state_batch(brw, AUB_TRACE_WM_CONSTANTS,
brw->wm.prog_data->nr_params *
sizeof(float),
32, &brw->wm.push_const_offset);
32, &brw->wm.base.push_const_offset);
for (i = 0; i < brw->wm.prog_data->nr_params; i++) {
constants[i] = *brw->wm.prog_data->param[i];
@ -117,7 +117,7 @@ upload_wm_state(struct brw_context *brw)
/* Pointer to the WM constant buffer. Covered by the set of
* state flags from gen6_upload_wm_push_constants.
*/
OUT_BATCH(brw->wm.push_const_offset +
OUT_BATCH(brw->wm.base.push_const_offset +
ALIGN(brw->wm.prog_data->nr_params,
brw->wm.prog_data->dispatch_width) / 8 - 1);
OUT_BATCH(0);
@ -140,7 +140,8 @@ upload_wm_state(struct brw_context *brw)
dw2 |= GEN6_WM_FLOATING_POINT_MODE_ALT;
/* CACHE_NEW_SAMPLER */
dw2 |= (ALIGN(brw->wm.sampler_count, 4) / 4) << GEN6_WM_SAMPLER_COUNT_SHIFT;
dw2 |= (ALIGN(brw->wm.base.sampler_count, 4) / 4) <<
GEN6_WM_SAMPLER_COUNT_SHIFT;
dw4 |= (brw->wm.prog_data->first_curbe_grf <<
GEN6_WM_DISPATCH_START_GRF_SHIFT_0);
dw4 |= (brw->wm.prog_data->first_curbe_grf_16 <<
@ -203,10 +204,11 @@ upload_wm_state(struct brw_context *brw)
BEGIN_BATCH(9);
OUT_BATCH(_3DSTATE_WM << 16 | (9 - 2));
OUT_BATCH(brw->wm.prog_offset);
OUT_BATCH(brw->wm.base.prog_offset);
OUT_BATCH(dw2);
if (brw->wm.prog_data->total_scratch) {
OUT_RELOC(brw->wm.scratch_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
OUT_RELOC(brw->wm.base.scratch_bo,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
ffs(brw->wm.prog_data->total_scratch) - 11);
} else {
OUT_BATCH(0);
@ -216,7 +218,7 @@ upload_wm_state(struct brw_context *brw)
OUT_BATCH(dw6);
OUT_BATCH(0); /* kernel 1 pointer */
/* kernel 2 pointer */
OUT_BATCH(brw->wm.prog_offset + brw->wm.prog_data->prog_offset_16);
OUT_BATCH(brw->wm.base.prog_offset + brw->wm.prog_data->prog_offset_16);
ADVANCE_BATCH();
}

View File

@ -119,13 +119,13 @@ upload_ps_state(struct brw_context *brw)
/* BRW_NEW_PS_BINDING_TABLE */
BEGIN_BATCH(2);
OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS_PS << 16 | (2 - 2));
OUT_BATCH(brw->wm.bind_bo_offset);
OUT_BATCH(brw->wm.base.bind_bo_offset);
ADVANCE_BATCH();
/* CACHE_NEW_SAMPLER */
BEGIN_BATCH(2);
OUT_BATCH(_3DSTATE_SAMPLER_STATE_POINTERS_PS << 16 | (2 - 2));
OUT_BATCH(brw->wm.sampler_offset);
OUT_BATCH(brw->wm.base.sampler_offset);
ADVANCE_BATCH();
/* CACHE_NEW_WM_PROG */
@ -150,7 +150,7 @@ upload_ps_state(struct brw_context *brw)
/* Pointer to the WM constant buffer. Covered by the set of
* state flags from gen6_upload_wm_push_constants.
*/
OUT_BATCH(brw->wm.push_const_offset | GEN7_MOCS_L3);
OUT_BATCH(brw->wm.base.push_const_offset | GEN7_MOCS_L3);
OUT_BATCH(0);
OUT_BATCH(0);
OUT_BATCH(0);
@ -160,7 +160,8 @@ upload_ps_state(struct brw_context *brw)
dw2 = dw4 = dw5 = 0;
/* CACHE_NEW_SAMPLER */
dw2 |= (ALIGN(brw->wm.sampler_count, 4) / 4) << GEN7_PS_SAMPLER_COUNT_SHIFT;
dw2 |=
(ALIGN(brw->wm.base.sampler_count, 4) / 4) << GEN7_PS_SAMPLER_COUNT_SHIFT;
/* Use ALT floating point mode for ARB fragment programs, because they
* require 0^0 == 1. Even though _CurrentFragmentProgram is used for
@ -205,10 +206,10 @@ upload_ps_state(struct brw_context *brw)
BEGIN_BATCH(8);
OUT_BATCH(_3DSTATE_PS << 16 | (8 - 2));
OUT_BATCH(brw->wm.prog_offset);
OUT_BATCH(brw->wm.base.prog_offset);
OUT_BATCH(dw2);
if (brw->wm.prog_data->total_scratch) {
OUT_RELOC(brw->wm.scratch_bo,
OUT_RELOC(brw->wm.base.scratch_bo,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
ffs(brw->wm.prog_data->total_scratch) - 11);
} else {
@ -217,7 +218,7 @@ upload_ps_state(struct brw_context *brw)
OUT_BATCH(dw4);
OUT_BATCH(dw5);
OUT_BATCH(0); /* kernel 1 pointer */
OUT_BATCH(brw->wm.prog_offset + brw->wm.prog_data->prog_offset_16);
OUT_BATCH(brw->wm.base.prog_offset + brw->wm.prog_data->prog_offset_16);
ADVANCE_BATCH();
}

View File

@ -465,7 +465,7 @@ gen7_update_null_renderbuffer_surface(struct brw_context *brw, unsigned unit)
const struct gl_framebuffer *fb = ctx->DrawBuffer;
uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 8 * 4, 32,
&brw->wm.surf_offset[SURF_INDEX_DRAW(unit)]);
&brw->wm.base.surf_offset[SURF_INDEX_DRAW(unit)]);
memset(surf, 0, 8 * 4);
/* From the Ivybridge PRM, Volume 4, Part 1, page 65,
@ -510,7 +510,7 @@ gen7_update_renderbuffer_surface(struct brw_context *brw,
uint32_t surf_index = SURF_INDEX_DRAW(unit);
uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 8 * 4, 32,
&brw->wm.surf_offset[surf_index]);
&brw->wm.base.surf_offset[surf_index]);
memset(surf, 0, 8 * 4);
intel_miptree_used_for_rendering(irb->mt);
@ -579,7 +579,7 @@ gen7_update_renderbuffer_surface(struct brw_context *brw,
(depth - 1) << GEN7_SURFACE_RENDER_TARGET_VIEW_EXTENT_SHIFT;
if (irb->mt->mcs_mt) {
gen7_set_surface_mcs_info(brw, surf, brw->wm.surf_offset[surf_index],
gen7_set_surface_mcs_info(brw, surf, brw->wm.base.surf_offset[surf_index],
irb->mt->mcs_mt, true /* is RT */);
}
@ -593,7 +593,7 @@ gen7_update_renderbuffer_surface(struct brw_context *brw,
}
drm_intel_bo_emit_reloc(brw->batch.bo,
brw->wm.surf_offset[surf_index] + 4,
brw->wm.base.surf_offset[surf_index] + 4,
region->bo,
surf[1] - region->bo->offset,
I915_GEM_DOMAIN_RENDER,