vc4: Don't abort when a shader compile fails.
It's much better to just skip the draw call entirely. Getting this information out of register allocation will also be useful for implementing threaded fragment shaders, which will need to retry non-threaded if RA fails. Cc: <mesa-stable@lists.freedesktop.org>
This commit is contained in:
parent
aaee3daa90
commit
4d019bd703
|
@ -157,6 +157,12 @@ struct vc4_compiled_shader {
|
|||
|
||||
bool disable_early_z;
|
||||
|
||||
/* Set if the compile failed, likely due to register allocation
|
||||
* failure. In this case, we have no shader to run and should not try
|
||||
* to do any draws.
|
||||
*/
|
||||
bool failed;
|
||||
|
||||
uint8_t num_inputs;
|
||||
|
||||
/* Byte offsets for the start of the vertex attributes 0-7, and the
|
||||
|
@ -462,7 +468,7 @@ void vc4_flush_jobs_reading_resource(struct vc4_context *vc4,
|
|||
void vc4_emit_state(struct pipe_context *pctx);
|
||||
void vc4_generate_code(struct vc4_context *vc4, struct vc4_compile *c);
|
||||
struct qpu_reg *vc4_register_allocate(struct vc4_context *vc4, struct vc4_compile *c);
|
||||
void vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode);
|
||||
bool vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode);
|
||||
|
||||
bool vc4_rt_format_supported(enum pipe_format f);
|
||||
bool vc4_rt_format_is_565(enum pipe_format f);
|
||||
|
|
|
@ -307,7 +307,10 @@ vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
|
|||
}
|
||||
|
||||
vc4_start_draw(vc4);
|
||||
vc4_update_compiled_shaders(vc4, info->mode);
|
||||
if (!vc4_update_compiled_shaders(vc4, info->mode)) {
|
||||
debug_warn_once("shader compile failed, skipping draw call.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
vc4_emit_state(pctx);
|
||||
|
||||
|
|
|
@ -2488,9 +2488,15 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
|
|||
}
|
||||
}
|
||||
|
||||
copy_uniform_state_to_shader(shader, c);
|
||||
shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
|
||||
c->qpu_inst_count * sizeof(uint64_t));
|
||||
shader->failed = c->failed;
|
||||
if (c->failed) {
|
||||
shader->failed = true;
|
||||
} else {
|
||||
copy_uniform_state_to_shader(shader, c);
|
||||
shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
|
||||
c->qpu_inst_count *
|
||||
sizeof(uint64_t));
|
||||
}
|
||||
|
||||
/* Copy the compiler UBO range state to the compiled shader, dropping
|
||||
* out arrays that were never referenced by an indirect load.
|
||||
|
@ -2693,11 +2699,15 @@ vc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode)
|
|||
}
|
||||
}
|
||||
|
||||
void
|
||||
bool
|
||||
vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode)
|
||||
{
|
||||
vc4_update_compiled_fs(vc4, prim_mode);
|
||||
vc4_update_compiled_vs(vc4, prim_mode);
|
||||
|
||||
return !(vc4->prog.cs->failed ||
|
||||
vc4->prog.vs->failed ||
|
||||
vc4->prog.fs->failed);
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
|
|
|
@ -523,6 +523,7 @@ struct vc4_compile {
|
|||
|
||||
uint32_t program_id;
|
||||
uint32_t variant_id;
|
||||
bool failed;
|
||||
};
|
||||
|
||||
/* Special nir_load_input intrinsic index for loading the current TLB
|
||||
|
|
|
@ -565,10 +565,13 @@ vc4_generate_code_block(struct vc4_compile *c,
|
|||
void
|
||||
vc4_generate_code(struct vc4_context *vc4, struct vc4_compile *c)
|
||||
{
|
||||
struct qpu_reg *temp_registers = vc4_register_allocate(vc4, c);
|
||||
struct qblock *start_block = list_first_entry(&c->blocks,
|
||||
struct qblock, link);
|
||||
|
||||
struct qpu_reg *temp_registers = vc4_register_allocate(vc4, c);
|
||||
if (!temp_registers)
|
||||
return;
|
||||
|
||||
switch (c->stage) {
|
||||
case QSTAGE_VERT:
|
||||
case QSTAGE_COORD:
|
||||
|
|
|
@ -323,7 +323,8 @@ vc4_register_allocate(struct vc4_context *vc4, struct vc4_compile *c)
|
|||
if (!ok) {
|
||||
fprintf(stderr, "Failed to register allocate:\n");
|
||||
qir_dump(c);
|
||||
abort();
|
||||
c->failed = true;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < c->num_temps; i++) {
|
||||
|
|
Loading…
Reference in New Issue