v3d: rename VC5 enums and definitions

As the driver was renamed in the past from VC5 to V3D, let's rename also
the definitions and enumerations to keep it consistent across the code.

Reviewed-by: Alejandro Piñeiro <apinheiro@igalia.com>
Signed-off-by: Juan A. Suarez Romero <jasuarez@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10402>
This commit is contained in:
Juan A. Suarez Romero 2021-04-21 18:43:14 +02:00
parent 3c318e6335
commit 14b66e27dc
18 changed files with 250 additions and 250 deletions

View File

@ -245,7 +245,7 @@ v3d_tfu(struct pipe_context *pctx,
return false;
/* Can't write to raster. */
if (dst_base_slice->tiling == VC5_TILING_RASTER)
if (dst_base_slice->tiling == V3D_TILING_RASTER)
return false;
/* When using TFU for blit, we are doing exact copies (both input and
@ -288,12 +288,12 @@ v3d_tfu(struct pipe_context *pctx,
uint32_t src_offset = (src->bo->offset +
v3d_layer_offset(psrc, src_level, src_layer));
tfu.iia |= src_offset;
if (src_base_slice->tiling == VC5_TILING_RASTER) {
if (src_base_slice->tiling == V3D_TILING_RASTER) {
tfu.icfg |= (V3D_TFU_ICFG_FORMAT_RASTER <<
V3D_TFU_ICFG_FORMAT_SHIFT);
} else {
tfu.icfg |= ((V3D_TFU_ICFG_FORMAT_LINEARTILE +
(src_base_slice->tiling - VC5_TILING_LINEARTILE)) <<
(src_base_slice->tiling - V3D_TILING_LINEARTILE)) <<
V3D_TFU_ICFG_FORMAT_SHIFT);
}
@ -303,24 +303,24 @@ v3d_tfu(struct pipe_context *pctx,
if (last_level != base_level)
tfu.ioa |= V3D_TFU_IOA_DIMTW;
tfu.ioa |= ((V3D_TFU_IOA_FORMAT_LINEARTILE +
(dst_base_slice->tiling - VC5_TILING_LINEARTILE)) <<
(dst_base_slice->tiling - V3D_TILING_LINEARTILE)) <<
V3D_TFU_IOA_FORMAT_SHIFT);
tfu.icfg |= tex_format << V3D_TFU_ICFG_TTYPE_SHIFT;
tfu.icfg |= (last_level - base_level) << V3D_TFU_ICFG_NUMMM_SHIFT;
switch (src_base_slice->tiling) {
case VC5_TILING_UIF_NO_XOR:
case VC5_TILING_UIF_XOR:
case V3D_TILING_UIF_NO_XOR:
case V3D_TILING_UIF_XOR:
tfu.iis |= (src_base_slice->padded_height /
(2 * v3d_utile_height(src->cpp)));
break;
case VC5_TILING_RASTER:
case V3D_TILING_RASTER:
tfu.iis |= src_base_slice->stride / src->cpp;
break;
case VC5_TILING_LINEARTILE:
case VC5_TILING_UBLINEAR_1_COLUMN:
case VC5_TILING_UBLINEAR_2_COLUMN:
case V3D_TILING_LINEARTILE:
case V3D_TILING_UBLINEAR_1_COLUMN:
case V3D_TILING_UBLINEAR_2_COLUMN:
break;
}
@ -329,8 +329,8 @@ v3d_tfu(struct pipe_context *pctx,
* those necessary to cover the height). When filling mipmaps, the
* miplevel 1+ tiling state is inferred.
*/
if (dst_base_slice->tiling == VC5_TILING_UIF_NO_XOR ||
dst_base_slice->tiling == VC5_TILING_UIF_XOR) {
if (dst_base_slice->tiling == V3D_TILING_UIF_NO_XOR ||
dst_base_slice->tiling == V3D_TILING_UIF_XOR) {
int uif_block_h = 2 * v3d_utile_height(dst->cpp);
int implicit_padded_height = align(height, uif_block_h);

View File

@ -278,7 +278,7 @@ cl_get_emit_space(struct v3d_cl_out **cl, size_t size)
* Helper function called by the XML-generated pack functions for filling in
* an address field in shader records.
*
* Since we have a private address space as of VC5, our BOs can have lifelong
* Since we have a private address space as of V3D, our BOs can have lifelong
* offsets, and all the kernel needs to know is which BOs need to be paged in
* for this exec.
*/

View File

@ -200,16 +200,16 @@ v3d_flag_dirty_sampler_state(struct v3d_context *v3d,
{
switch (shader) {
case PIPE_SHADER_VERTEX:
v3d->dirty |= VC5_DIRTY_VERTTEX;
v3d->dirty |= V3D_DIRTY_VERTTEX;
break;
case PIPE_SHADER_GEOMETRY:
v3d->dirty |= VC5_DIRTY_GEOMTEX;
v3d->dirty |= V3D_DIRTY_GEOMTEX;
break;
case PIPE_SHADER_FRAGMENT:
v3d->dirty |= VC5_DIRTY_FRAGTEX;
v3d->dirty |= V3D_DIRTY_FRAGTEX;
break;
case PIPE_SHADER_COMPUTE:
v3d->dirty |= VC5_DIRTY_COMPTEX;
v3d->dirty |= V3D_DIRTY_COMPTEX;
break;
default:
unreachable("Unsupported shader stage");

View File

@ -56,48 +56,48 @@ void v3d_job_add_bo(struct v3d_job *job, struct v3d_bo *bo);
#define using_v3d_simulator false
#endif
#define VC5_DIRTY_BLEND (1ull << 0)
#define VC5_DIRTY_RASTERIZER (1ull << 1)
#define VC5_DIRTY_ZSA (1ull << 2)
#define VC5_DIRTY_COMPTEX (1ull << 3)
#define VC5_DIRTY_VERTTEX (1ull << 4)
#define VC5_DIRTY_GEOMTEX (1ull << 5)
#define VC5_DIRTY_FRAGTEX (1ull << 6)
#define V3D_DIRTY_BLEND (1ull << 0)
#define V3D_DIRTY_RASTERIZER (1ull << 1)
#define V3D_DIRTY_ZSA (1ull << 2)
#define V3D_DIRTY_COMPTEX (1ull << 3)
#define V3D_DIRTY_VERTTEX (1ull << 4)
#define V3D_DIRTY_GEOMTEX (1ull << 5)
#define V3D_DIRTY_FRAGTEX (1ull << 6)
#define VC5_DIRTY_SHADER_IMAGE (1ull << 9)
#define VC5_DIRTY_BLEND_COLOR (1ull << 10)
#define VC5_DIRTY_STENCIL_REF (1ull << 11)
#define VC5_DIRTY_SAMPLE_STATE (1ull << 12)
#define VC5_DIRTY_FRAMEBUFFER (1ull << 13)
#define VC5_DIRTY_STIPPLE (1ull << 14)
#define VC5_DIRTY_VIEWPORT (1ull << 15)
#define VC5_DIRTY_CONSTBUF (1ull << 16)
#define VC5_DIRTY_VTXSTATE (1ull << 17)
#define VC5_DIRTY_VTXBUF (1ull << 18)
#define VC5_DIRTY_SCISSOR (1ull << 19)
#define VC5_DIRTY_FLAT_SHADE_FLAGS (1ull << 20)
#define VC5_DIRTY_PRIM_MODE (1ull << 21)
#define VC5_DIRTY_CLIP (1ull << 22)
#define VC5_DIRTY_UNCOMPILED_CS (1ull << 23)
#define VC5_DIRTY_UNCOMPILED_VS (1ull << 24)
#define VC5_DIRTY_UNCOMPILED_GS (1ull << 25)
#define VC5_DIRTY_UNCOMPILED_FS (1ull << 26)
#define V3D_DIRTY_SHADER_IMAGE (1ull << 9)
#define V3D_DIRTY_BLEND_COLOR (1ull << 10)
#define V3D_DIRTY_STENCIL_REF (1ull << 11)
#define V3D_DIRTY_SAMPLE_STATE (1ull << 12)
#define V3D_DIRTY_FRAMEBUFFER (1ull << 13)
#define V3D_DIRTY_STIPPLE (1ull << 14)
#define V3D_DIRTY_VIEWPORT (1ull << 15)
#define V3D_DIRTY_CONSTBUF (1ull << 16)
#define V3D_DIRTY_VTXSTATE (1ull << 17)
#define V3D_DIRTY_VTXBUF (1ull << 18)
#define V3D_DIRTY_SCISSOR (1ull << 19)
#define V3D_DIRTY_FLAT_SHADE_FLAGS (1ull << 20)
#define V3D_DIRTY_PRIM_MODE (1ull << 21)
#define V3D_DIRTY_CLIP (1ull << 22)
#define V3D_DIRTY_UNCOMPILED_CS (1ull << 23)
#define V3D_DIRTY_UNCOMPILED_VS (1ull << 24)
#define V3D_DIRTY_UNCOMPILED_GS (1ull << 25)
#define V3D_DIRTY_UNCOMPILED_FS (1ull << 26)
#define VC5_DIRTY_COMPILED_CS (1ull << 29)
#define VC5_DIRTY_COMPILED_VS (1ull << 30)
#define VC5_DIRTY_COMPILED_GS_BIN (1ULL << 31)
#define VC5_DIRTY_COMPILED_GS (1ULL << 32)
#define VC5_DIRTY_COMPILED_FS (1ull << 33)
#define V3D_DIRTY_COMPILED_CS (1ull << 29)
#define V3D_DIRTY_COMPILED_VS (1ull << 30)
#define V3D_DIRTY_COMPILED_GS_BIN (1ULL << 31)
#define V3D_DIRTY_COMPILED_GS (1ULL << 32)
#define V3D_DIRTY_COMPILED_FS (1ull << 33)
#define VC5_DIRTY_FS_INPUTS (1ull << 38)
#define VC5_DIRTY_GS_INPUTS (1ull << 39)
#define VC5_DIRTY_STREAMOUT (1ull << 40)
#define VC5_DIRTY_OQ (1ull << 41)
#define VC5_DIRTY_CENTROID_FLAGS (1ull << 42)
#define VC5_DIRTY_NOPERSPECTIVE_FLAGS (1ull << 43)
#define VC5_DIRTY_SSBO (1ull << 44)
#define V3D_DIRTY_FS_INPUTS (1ull << 38)
#define V3D_DIRTY_GS_INPUTS (1ull << 39)
#define V3D_DIRTY_STREAMOUT (1ull << 40)
#define V3D_DIRTY_OQ (1ull << 41)
#define V3D_DIRTY_CENTROID_FLAGS (1ull << 42)
#define V3D_DIRTY_NOPERSPECTIVE_FLAGS (1ull << 43)
#define V3D_DIRTY_SSBO (1ull << 44)
#define VC5_MAX_FS_INPUTS 64
#define V3D_MAX_FS_INPUTS 64
enum v3d_sampler_state_variant {
V3D_SAMPLER_STATE_BORDER_0,
@ -219,7 +219,7 @@ struct v3d_compiled_shader {
} prog_data;
/**
* VC5_DIRTY_* flags that, when set in v3d->dirty, mean that the
* V3D_DIRTY_* flags that, when set in v3d->dirty, mean that the
* uniforms have to be rewritten (and therefore the shader state
* reemitted).
*/
@ -284,10 +284,10 @@ struct v3d_job_key {
};
enum v3d_ez_state {
VC5_EZ_UNDECIDED = 0,
VC5_EZ_GT_GE,
VC5_EZ_LT_LE,
VC5_EZ_DISABLED,
V3D_EZ_UNDECIDED = 0,
V3D_EZ_GT_GE,
V3D_EZ_LT_LE,
V3D_EZ_DISABLED,
};
struct v3d_image_view {
@ -359,7 +359,7 @@ struct v3d_job {
/** @} */
/** @{
* Width/height of the color framebuffer being rendered to,
* for VC5_TILE_RENDERING_MODE_CONFIG.
* for V3D_TILE_RENDERING_MODE_CONFIG.
*/
uint32_t draw_width;
uint32_t draw_height;
@ -398,7 +398,7 @@ struct v3d_job {
/**
* Set if some drawing (triangles, blits, or just a glClear()) has
* been done to the FBO, meaning that we need to
* DRM_IOCTL_VC5_SUBMIT_CL.
* DRM_IOCTL_V3D_SUBMIT_CL.
*/
bool needs_flush;
@ -462,7 +462,7 @@ struct v3d_context {
struct slab_child_pool transfer_pool;
struct blitter_context *blitter;
/** bitfield of VC5_DIRTY_* */
/** bitfield of V3D_DIRTY_* */
uint64_t dirty;
struct primconvert_context *primconvert;

View File

@ -24,7 +24,7 @@
/**
* @file v3d_formats.c
*
* Contains the table and accessors for VC5 texture and render target format
* Contains the table and accessors for V3D texture and render target format
* support.
*
* The hardware has limited support for texture formats, and extremely limited

View File

@ -23,7 +23,7 @@
/** @file v3d_job.c
*
* Functions for submitting VC5 render jobs to the kernel.
* Functions for submitting V3D render jobs to the kernel.
*/
#include <xf86drm.h>

View File

@ -523,14 +523,14 @@ v3d_update_compiled_fs(struct v3d_context *v3d, uint8_t prim_mode)
struct v3d_fs_key *key = &local_key;
nir_shader *s = v3d->prog.bind_fs->base.ir.nir;
if (!(v3d->dirty & (VC5_DIRTY_PRIM_MODE |
VC5_DIRTY_BLEND |
VC5_DIRTY_FRAMEBUFFER |
VC5_DIRTY_ZSA |
VC5_DIRTY_RASTERIZER |
VC5_DIRTY_SAMPLE_STATE |
VC5_DIRTY_FRAGTEX |
VC5_DIRTY_UNCOMPILED_FS))) {
if (!(v3d->dirty & (V3D_DIRTY_PRIM_MODE |
V3D_DIRTY_BLEND |
V3D_DIRTY_FRAMEBUFFER |
V3D_DIRTY_ZSA |
V3D_DIRTY_RASTERIZER |
V3D_DIRTY_SAMPLE_STATE |
V3D_DIRTY_FRAGTEX |
V3D_DIRTY_UNCOMPILED_FS))) {
return;
}
@ -609,29 +609,29 @@ v3d_update_compiled_fs(struct v3d_context *v3d, uint8_t prim_mode)
if (v3d->prog.fs == old_fs)
return;
v3d->dirty |= VC5_DIRTY_COMPILED_FS;
v3d->dirty |= V3D_DIRTY_COMPILED_FS;
if (old_fs) {
if (v3d->prog.fs->prog_data.fs->flat_shade_flags !=
old_fs->prog_data.fs->flat_shade_flags) {
v3d->dirty |= VC5_DIRTY_FLAT_SHADE_FLAGS;
v3d->dirty |= V3D_DIRTY_FLAT_SHADE_FLAGS;
}
if (v3d->prog.fs->prog_data.fs->noperspective_flags !=
old_fs->prog_data.fs->noperspective_flags) {
v3d->dirty |= VC5_DIRTY_NOPERSPECTIVE_FLAGS;
v3d->dirty |= V3D_DIRTY_NOPERSPECTIVE_FLAGS;
}
if (v3d->prog.fs->prog_data.fs->centroid_flags !=
old_fs->prog_data.fs->centroid_flags) {
v3d->dirty |= VC5_DIRTY_CENTROID_FLAGS;
v3d->dirty |= V3D_DIRTY_CENTROID_FLAGS;
}
}
if (old_fs && memcmp(v3d->prog.fs->prog_data.fs->input_slots,
old_fs->prog_data.fs->input_slots,
sizeof(v3d->prog.fs->prog_data.fs->input_slots))) {
v3d->dirty |= VC5_DIRTY_FS_INPUTS;
v3d->dirty |= V3D_DIRTY_FS_INPUTS;
}
}
@ -641,11 +641,11 @@ v3d_update_compiled_gs(struct v3d_context *v3d, uint8_t prim_mode)
struct v3d_gs_key local_key;
struct v3d_gs_key *key = &local_key;
if (!(v3d->dirty & (VC5_DIRTY_GEOMTEX |
VC5_DIRTY_RASTERIZER |
VC5_DIRTY_UNCOMPILED_GS |
VC5_DIRTY_PRIM_MODE |
VC5_DIRTY_FS_INPUTS))) {
if (!(v3d->dirty & (V3D_DIRTY_GEOMTEX |
V3D_DIRTY_RASTERIZER |
V3D_DIRTY_UNCOMPILED_GS |
V3D_DIRTY_PRIM_MODE |
V3D_DIRTY_FS_INPUTS))) {
return;
}
@ -674,7 +674,7 @@ v3d_update_compiled_gs(struct v3d_context *v3d, uint8_t prim_mode)
v3d_get_compiled_shader(v3d, &key->base, sizeof(*key));
if (gs != v3d->prog.gs) {
v3d->prog.gs = gs;
v3d->dirty |= VC5_DIRTY_COMPILED_GS;
v3d->dirty |= V3D_DIRTY_COMPILED_GS;
}
key->is_coord = true;
@ -699,13 +699,13 @@ v3d_update_compiled_gs(struct v3d_context *v3d, uint8_t prim_mode)
v3d_get_compiled_shader(v3d, &key->base, sizeof(*key));
if (gs_bin != old_gs) {
v3d->prog.gs_bin = gs_bin;
v3d->dirty |= VC5_DIRTY_COMPILED_GS_BIN;
v3d->dirty |= V3D_DIRTY_COMPILED_GS_BIN;
}
if (old_gs && memcmp(v3d->prog.gs->prog_data.gs->input_slots,
old_gs->prog_data.gs->input_slots,
sizeof(v3d->prog.gs->prog_data.gs->input_slots))) {
v3d->dirty |= VC5_DIRTY_GS_INPUTS;
v3d->dirty |= V3D_DIRTY_GS_INPUTS;
}
}
@ -715,13 +715,13 @@ v3d_update_compiled_vs(struct v3d_context *v3d, uint8_t prim_mode)
struct v3d_vs_key local_key;
struct v3d_vs_key *key = &local_key;
if (!(v3d->dirty & (VC5_DIRTY_VERTTEX |
VC5_DIRTY_VTXSTATE |
VC5_DIRTY_UNCOMPILED_VS |
(v3d->prog.bind_gs ? 0 : VC5_DIRTY_RASTERIZER) |
(v3d->prog.bind_gs ? 0 : VC5_DIRTY_PRIM_MODE) |
(v3d->prog.bind_gs ? VC5_DIRTY_GS_INPUTS :
VC5_DIRTY_FS_INPUTS)))) {
if (!(v3d->dirty & (V3D_DIRTY_VERTTEX |
V3D_DIRTY_VTXSTATE |
V3D_DIRTY_UNCOMPILED_VS |
(v3d->prog.bind_gs ? 0 : V3D_DIRTY_RASTERIZER) |
(v3d->prog.bind_gs ? 0 : V3D_DIRTY_PRIM_MODE) |
(v3d->prog.bind_gs ? V3D_DIRTY_GS_INPUTS :
V3D_DIRTY_FS_INPUTS)))) {
return;
}
@ -776,7 +776,7 @@ v3d_update_compiled_vs(struct v3d_context *v3d, uint8_t prim_mode)
v3d_get_compiled_shader(v3d, &key->base, sizeof(*key));
if (vs != v3d->prog.vs) {
v3d->prog.vs = vs;
v3d->dirty |= VC5_DIRTY_COMPILED_VS;
v3d->dirty |= V3D_DIRTY_COMPILED_VS;
}
key->is_coord = true;
@ -814,7 +814,7 @@ v3d_update_compiled_vs(struct v3d_context *v3d, uint8_t prim_mode)
v3d_get_compiled_shader(v3d, &key->base, sizeof(*key));
if (cs != v3d->prog.cs) {
v3d->prog.cs = cs;
v3d->dirty |= VC5_DIRTY_COMPILED_CS;
v3d->dirty |= V3D_DIRTY_COMPILED_CS;
}
}
@ -832,8 +832,8 @@ v3d_update_compiled_cs(struct v3d_context *v3d)
struct v3d_key local_key;
struct v3d_key *key = &local_key;
if (!(v3d->dirty & (VC5_DIRTY_UNCOMPILED_CS |
VC5_DIRTY_COMPTEX))) {
if (!(v3d->dirty & (V3D_DIRTY_UNCOMPILED_CS |
V3D_DIRTY_COMPTEX))) {
return;
}
@ -845,7 +845,7 @@ v3d_update_compiled_cs(struct v3d_context *v3d)
v3d_get_compiled_shader(v3d, key, sizeof(*key));
if (cs != v3d->prog.compute) {
v3d->prog.compute = cs;
v3d->dirty |= VC5_DIRTY_COMPILED_CS; /* XXX */
v3d->dirty |= V3D_DIRTY_COMPILED_CS; /* XXX */
}
}
@ -933,7 +933,7 @@ v3d_fp_state_bind(struct pipe_context *pctx, void *hwcso)
{
struct v3d_context *v3d = v3d_context(pctx);
v3d->prog.bind_fs = hwcso;
v3d->dirty |= VC5_DIRTY_UNCOMPILED_FS;
v3d->dirty |= V3D_DIRTY_UNCOMPILED_FS;
}
static void
@ -941,7 +941,7 @@ v3d_gp_state_bind(struct pipe_context *pctx, void *hwcso)
{
struct v3d_context *v3d = v3d_context(pctx);
v3d->prog.bind_gs = hwcso;
v3d->dirty |= VC5_DIRTY_UNCOMPILED_GS;
v3d->dirty |= V3D_DIRTY_UNCOMPILED_GS;
}
static void
@ -949,7 +949,7 @@ v3d_vp_state_bind(struct pipe_context *pctx, void *hwcso)
{
struct v3d_context *v3d = v3d_context(pctx);
v3d->prog.bind_vs = hwcso;
v3d->dirty |= VC5_DIRTY_UNCOMPILED_VS;
v3d->dirty |= V3D_DIRTY_UNCOMPILED_VS;
}
static void
@ -958,7 +958,7 @@ v3d_compute_state_bind(struct pipe_context *pctx, void *state)
struct v3d_context *v3d = v3d_context(pctx);
v3d->prog.bind_compute = state;
v3d->dirty |= VC5_DIRTY_UNCOMPILED_CS;
v3d->dirty |= V3D_DIRTY_UNCOMPILED_CS;
}
static void *

View File

@ -97,7 +97,7 @@ v3d_begin_query(struct pipe_context *pctx, struct pipe_query *query)
*map = 0;
v3d->current_oq = q->bo;
v3d->dirty |= VC5_DIRTY_OQ;
v3d->dirty |= V3D_DIRTY_OQ;
break;
default:
unreachable("unsupported query type");
@ -135,7 +135,7 @@ v3d_end_query(struct pipe_context *pctx, struct pipe_query *query)
case PIPE_QUERY_OCCLUSION_PREDICATE:
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
v3d->current_oq = NULL;
v3d->dirty |= VC5_DIRTY_OQ;
v3d->dirty |= V3D_DIRTY_OQ;
break;
default:
unreachable("unsupported query type");
@ -195,8 +195,8 @@ v3d_set_active_query_state(struct pipe_context *pctx, bool enable)
struct v3d_context *v3d = v3d_context(pctx);
v3d->active_queries = enable;
v3d->dirty |= VC5_DIRTY_OQ;
v3d->dirty |= VC5_DIRTY_STREAMOUT;
v3d->dirty |= V3D_DIRTY_OQ;
v3d->dirty |= V3D_DIRTY_STREAMOUT;
}
void

View File

@ -59,12 +59,12 @@ v3d_debug_resource_layout(struct v3d_resource *rsc, const char *caller)
}
static const char *const tiling_descriptions[] = {
[VC5_TILING_RASTER] = "R",
[VC5_TILING_LINEARTILE] = "LT",
[VC5_TILING_UBLINEAR_1_COLUMN] = "UB1",
[VC5_TILING_UBLINEAR_2_COLUMN] = "UB2",
[VC5_TILING_UIF_NO_XOR] = "UIF",
[VC5_TILING_UIF_XOR] = "UIF^",
[V3D_TILING_RASTER] = "R",
[V3D_TILING_LINEARTILE] = "LT",
[V3D_TILING_UBLINEAR_1_COLUMN] = "UB1",
[V3D_TILING_UBLINEAR_2_COLUMN] = "UB2",
[V3D_TILING_UIF_NO_XOR] = "UIF",
[V3D_TILING_UIF_XOR] = "UIF^",
};
for (int i = 0; i <= prsc->last_level; i++) {
@ -184,9 +184,9 @@ v3d_map_usage_prep(struct pipe_context *pctx,
* or uniforms.
*/
if (prsc->bind & PIPE_BIND_VERTEX_BUFFER)
v3d->dirty |= VC5_DIRTY_VTXBUF;
v3d->dirty |= V3D_DIRTY_VTXBUF;
if (prsc->bind & PIPE_BIND_CONSTANT_BUFFER)
v3d->dirty |= VC5_DIRTY_CONSTBUF;
v3d->dirty |= V3D_DIRTY_CONSTBUF;
if (prsc->bind & PIPE_BIND_SAMPLER_VIEW)
rebind_sampler_views(v3d, rsc);
} else {
@ -421,8 +421,8 @@ v3d_resource_get_handle(struct pipe_screen *pscreen,
/* A shared tiled buffer should always be allocated as UIF,
* not UBLINEAR or LT.
*/
assert(rsc->slices[0].tiling == VC5_TILING_UIF_XOR ||
rsc->slices[0].tiling == VC5_TILING_UIF_NO_XOR);
assert(rsc->slices[0].tiling == V3D_TILING_UIF_XOR ||
rsc->slices[0].tiling == V3D_TILING_UIF_NO_XOR);
whandle->modifier = DRM_FORMAT_MOD_BROADCOM_UIF;
} else {
whandle->modifier = DRM_FORMAT_MOD_LINEAR;
@ -448,9 +448,9 @@ v3d_resource_get_handle(struct pipe_screen *pscreen,
return false;
}
#define PAGE_UB_ROWS (VC5_UIFCFG_PAGE_SIZE / VC5_UIFBLOCK_ROW_SIZE)
#define PAGE_UB_ROWS (V3D_UIFCFG_PAGE_SIZE / V3D_UIFBLOCK_ROW_SIZE)
#define PAGE_UB_ROWS_TIMES_1_5 ((PAGE_UB_ROWS * 3) >> 1)
#define PAGE_CACHE_UB_ROWS (VC5_PAGE_CACHE_SIZE / VC5_UIFBLOCK_ROW_SIZE)
#define PAGE_CACHE_UB_ROWS (V3D_PAGE_CACHE_SIZE / V3D_UIFBLOCK_ROW_SIZE)
#define PAGE_CACHE_MINUS_1_5_UB_ROWS (PAGE_CACHE_UB_ROWS - PAGE_UB_ROWS_TIMES_1_5)
/**
@ -555,24 +555,24 @@ v3d_setup_slices(struct v3d_resource *rsc, uint32_t winsys_stride,
level_height = DIV_ROUND_UP(level_height, block_height);
if (!rsc->tiled) {
slice->tiling = VC5_TILING_RASTER;
slice->tiling = V3D_TILING_RASTER;
if (prsc->target == PIPE_TEXTURE_1D)
level_width = align(level_width, 64 / rsc->cpp);
} else {
if ((i != 0 || !uif_top) &&
(level_width <= utile_w ||
level_height <= utile_h)) {
slice->tiling = VC5_TILING_LINEARTILE;
slice->tiling = V3D_TILING_LINEARTILE;
level_width = align(level_width, utile_w);
level_height = align(level_height, utile_h);
} else if ((i != 0 || !uif_top) &&
level_width <= uif_block_w) {
slice->tiling = VC5_TILING_UBLINEAR_1_COLUMN;
slice->tiling = V3D_TILING_UBLINEAR_1_COLUMN;
level_width = align(level_width, uif_block_w);
level_height = align(level_height, uif_block_h);
} else if ((i != 0 || !uif_top) &&
level_width <= 2 * uif_block_w) {
slice->tiling = VC5_TILING_UBLINEAR_2_COLUMN;
slice->tiling = V3D_TILING_UBLINEAR_2_COLUMN;
level_width = align(level_width, 2 * uif_block_w);
level_height = align(level_height, uif_block_h);
} else {
@ -595,11 +595,11 @@ v3d_setup_slices(struct v3d_resource *rsc, uint32_t winsys_stride,
* perfectly misaligned
*/
if ((level_height / uif_block_h) %
(VC5_PAGE_CACHE_SIZE /
VC5_UIFBLOCK_ROW_SIZE) == 0) {
slice->tiling = VC5_TILING_UIF_XOR;
(V3D_PAGE_CACHE_SIZE /
V3D_UIFBLOCK_ROW_SIZE) == 0) {
slice->tiling = V3D_TILING_UIF_XOR;
} else {
slice->tiling = VC5_TILING_UIF_NO_XOR;
slice->tiling = V3D_TILING_UIF_NO_XOR;
}
}
}
@ -623,7 +623,7 @@ v3d_setup_slices(struct v3d_resource *rsc, uint32_t winsys_stride,
level_width > 4 * uif_block_w &&
level_height > PAGE_CACHE_MINUS_1_5_UB_ROWS * uif_block_h) {
slice_total_size = align(slice_total_size,
VC5_UIFCFG_PAGE_SIZE);
V3D_UIFCFG_PAGE_SIZE);
}
offset += slice_total_size;
@ -1076,8 +1076,8 @@ v3d_create_surface(struct pipe_context *pctx,
surface->internal_bpp = bpp;
}
if (surface->tiling == VC5_TILING_UIF_NO_XOR ||
surface->tiling == VC5_TILING_UIF_XOR) {
if (surface->tiling == V3D_TILING_UIF_NO_XOR ||
surface->tiling == V3D_TILING_UIF_XOR) {
surface->padded_height_of_output_image_in_uif_blocks =
(slice->padded_height /
(2 * v3d_utile_height(rsc->cpp)));

View File

@ -43,26 +43,26 @@
*/
enum v3d_tiling_mode {
/* Untiled resources. Not valid as texture inputs. */
VC5_TILING_RASTER,
V3D_TILING_RASTER,
/* Single line of u-tiles. */
VC5_TILING_LINEARTILE,
V3D_TILING_LINEARTILE,
/* Departure from standard 4-UIF block column format. */
VC5_TILING_UBLINEAR_1_COLUMN,
V3D_TILING_UBLINEAR_1_COLUMN,
/* Departure from standard 4-UIF block column format. */
VC5_TILING_UBLINEAR_2_COLUMN,
V3D_TILING_UBLINEAR_2_COLUMN,
/* Normal tiling format: grouped in 4x4 UIFblocks, each of which is
* split 2x2 into utiles.
*/
VC5_TILING_UIF_NO_XOR,
V3D_TILING_UIF_NO_XOR,
/* Normal tiling format: grouped in 4x4 UIFblocks, each of which is
* split 2x2 into utiles.
*/
VC5_TILING_UIF_XOR,
V3D_TILING_UIF_XOR,
};
struct v3d_transfer {

View File

@ -38,13 +38,13 @@ struct v3d_bo;
/* These are tunable parameters in the HW design, but all the V3D
* implementations agree.
*/
#define VC5_UIFCFG_BANKS 8
#define VC5_UIFCFG_PAGE_SIZE 4096
#define VC5_UIFCFG_XOR_VALUE (1 << 4)
#define VC5_PAGE_CACHE_SIZE (VC5_UIFCFG_PAGE_SIZE * VC5_UIFCFG_BANKS)
#define VC5_UBLOCK_SIZE 64
#define VC5_UIFBLOCK_SIZE (4 * VC5_UBLOCK_SIZE)
#define VC5_UIFBLOCK_ROW_SIZE (4 * VC5_UIFBLOCK_SIZE)
#define V3D_UIFCFG_BANKS 8
#define V3D_UIFCFG_PAGE_SIZE 4096
#define V3D_UIFCFG_XOR_VALUE (1 << 4)
#define V3D_PAGE_CACHE_SIZE (V3D_UIFCFG_PAGE_SIZE * V3D_UIFCFG_BANKS)
#define V3D_UBLOCK_SIZE 64
#define V3D_UIFBLOCK_SIZE (4 * V3D_UBLOCK_SIZE)
#define V3D_UIFBLOCK_ROW_SIZE (4 * V3D_UIFBLOCK_SIZE)
struct v3d_simulator_file;

View File

@ -23,7 +23,7 @@
/** @file v3d_tiling.c
*
* Handles information about the VC5 tiling formats, and loading and storing
* Handles information about the V3D tiling formats, and loading and storing
* from them.
*/
@ -149,7 +149,7 @@ v3d_get_ublinear_1_column_pixel_offset(uint32_t cpp, uint32_t image_h,
/**
* Returns the byte offset for a given pixel in a UIF layout.
*
* UIF is the general VC5 tiling layout shared across 3D, media, and scanout.
* UIF is the general V3D tiling layout shared across 3D, media, and scanout.
* It stores pixels in UIF blocks (2x2 utiles), and UIF blocks are stored in
* 4x4 groups, and those 4x4 groups are then stored in raster order.
*/
@ -412,35 +412,35 @@ v3d_move_tiled_image(void *gpu, uint32_t gpu_stride,
bool is_load)
{
switch (tiling_format) {
case VC5_TILING_UIF_XOR:
case V3D_TILING_UIF_XOR:
v3d_move_pixels_general(gpu, gpu_stride,
cpu, cpu_stride,
cpp, image_h, box,
v3d_get_uif_xor_pixel_offset,
is_load);
break;
case VC5_TILING_UIF_NO_XOR:
case V3D_TILING_UIF_NO_XOR:
v3d_move_pixels_general(gpu, gpu_stride,
cpu, cpu_stride,
cpp, image_h, box,
v3d_get_uif_no_xor_pixel_offset,
is_load);
break;
case VC5_TILING_UBLINEAR_2_COLUMN:
case V3D_TILING_UBLINEAR_2_COLUMN:
v3d_move_pixels_general(gpu, gpu_stride,
cpu, cpu_stride,
cpp, image_h, box,
v3d_get_ublinear_2_column_pixel_offset,
is_load);
break;
case VC5_TILING_UBLINEAR_1_COLUMN:
case V3D_TILING_UBLINEAR_1_COLUMN:
v3d_move_pixels_general(gpu, gpu_stride,
cpu, cpu_stride,
cpp, image_h, box,
v3d_get_ublinear_1_column_pixel_offset,
is_load);
break;
case VC5_TILING_LINEARTILE:
case V3D_TILING_LINEARTILE:
v3d_move_pixels_general(gpu, gpu_stride,
cpu, cpu_stride,
cpp, image_h, box,

View File

@ -417,18 +417,18 @@ v3d_set_shader_uniform_dirty_flags(struct v3d_compiled_shader *shader)
break;
case QUNIFORM_UNIFORM:
case QUNIFORM_UBO_ADDR:
dirty |= VC5_DIRTY_CONSTBUF;
dirty |= V3D_DIRTY_CONSTBUF;
break;
case QUNIFORM_VIEWPORT_X_SCALE:
case QUNIFORM_VIEWPORT_Y_SCALE:
case QUNIFORM_VIEWPORT_Z_OFFSET:
case QUNIFORM_VIEWPORT_Z_SCALE:
dirty |= VC5_DIRTY_VIEWPORT;
dirty |= V3D_DIRTY_VIEWPORT;
break;
case QUNIFORM_USER_CLIP_PLANE:
dirty |= VC5_DIRTY_CLIP;
dirty |= V3D_DIRTY_CLIP;
break;
case QUNIFORM_TMU_CONFIG_P0:
@ -447,13 +447,13 @@ v3d_set_shader_uniform_dirty_flags(struct v3d_compiled_shader *shader)
/* We could flag this on just the stage we're
* compiling for, but it's not passed in.
*/
dirty |= VC5_DIRTY_FRAGTEX | VC5_DIRTY_VERTTEX |
VC5_DIRTY_GEOMTEX | VC5_DIRTY_COMPTEX;
dirty |= V3D_DIRTY_FRAGTEX | V3D_DIRTY_VERTTEX |
V3D_DIRTY_GEOMTEX | V3D_DIRTY_COMPTEX;
break;
case QUNIFORM_SSBO_OFFSET:
case QUNIFORM_GET_SSBO_SIZE:
dirty |= VC5_DIRTY_SSBO;
dirty |= V3D_DIRTY_SSBO;
break;
case QUNIFORM_IMAGE_TMU_CONFIG_P0:
@ -461,12 +461,12 @@ v3d_set_shader_uniform_dirty_flags(struct v3d_compiled_shader *shader)
case QUNIFORM_IMAGE_HEIGHT:
case QUNIFORM_IMAGE_DEPTH:
case QUNIFORM_IMAGE_ARRAY_SIZE:
dirty |= VC5_DIRTY_SHADER_IMAGE;
dirty |= V3D_DIRTY_SHADER_IMAGE;
break;
case QUNIFORM_LINE_WIDTH:
case QUNIFORM_AA_LINE_WIDTH:
dirty |= VC5_DIRTY_RASTERIZER;
dirty |= V3D_DIRTY_RASTERIZER;
break;
case QUNIFORM_NUM_WORK_GROUPS:
@ -475,13 +475,13 @@ v3d_set_shader_uniform_dirty_flags(struct v3d_compiled_shader *shader)
break;
case QUNIFORM_FB_LAYERS:
dirty |= VC5_DIRTY_FRAMEBUFFER;
dirty |= V3D_DIRTY_FRAMEBUFFER;
break;
default:
assert(quniform_contents_is_texture_p0(shader->prog_data.base->uniforms.contents[i]));
dirty |= VC5_DIRTY_FRAGTEX | VC5_DIRTY_VERTTEX |
VC5_DIRTY_GEOMTEX | VC5_DIRTY_COMPTEX;
dirty |= V3D_DIRTY_FRAGTEX | V3D_DIRTY_VERTTEX |
V3D_DIRTY_GEOMTEX | V3D_DIRTY_COMPTEX;
break;
}
}

View File

@ -635,9 +635,9 @@ v3d_emit_gl_shader_state(struct v3d_context *v3d,
const struct pipe_draw_info *info)
{
struct v3d_job *job = v3d->job;
/* VC5_DIRTY_VTXSTATE */
/* V3D_DIRTY_VTXSTATE */
struct v3d_vertex_stateobj *vtx = v3d->vtx;
/* VC5_DIRTY_VTXBUF */
/* V3D_DIRTY_VTXBUF */
struct v3d_vertexbuf_stateobj *vertexbuf = &v3d->vertexbuf;
/* Upload the uniforms to the indirect CL first */
@ -760,7 +760,7 @@ v3d_emit_gl_shader_state(struct v3d_context *v3d,
cl_emit(&job->indirect, GL_SHADER_STATE_RECORD, shader) {
shader.enable_clipping = true;
/* VC5_DIRTY_PRIM_MODE | VC5_DIRTY_RASTERIZER */
/* V3D_DIRTY_PRIM_MODE | V3D_DIRTY_RASTERIZER */
shader.point_size_in_shaded_vertex_data =
(info->mode == PIPE_PRIM_POINTS &&
v3d->rasterizer->base.point_size_per_vertex);
@ -1012,30 +1012,30 @@ static void
v3d_update_job_ez(struct v3d_context *v3d, struct v3d_job *job)
{
switch (v3d->zsa->ez_state) {
case VC5_EZ_UNDECIDED:
case V3D_EZ_UNDECIDED:
/* If the Z/S state didn't pick a direction but didn't
* disable, then go along with the current EZ state. This
* allows EZ optimization for Z func == EQUAL or NEVER.
*/
break;
case VC5_EZ_LT_LE:
case VC5_EZ_GT_GE:
case V3D_EZ_LT_LE:
case V3D_EZ_GT_GE:
/* If the Z/S state picked a direction, then it needs to match
* the current direction if we've decided on one.
*/
if (job->ez_state == VC5_EZ_UNDECIDED)
if (job->ez_state == V3D_EZ_UNDECIDED)
job->ez_state = v3d->zsa->ez_state;
else if (job->ez_state != v3d->zsa->ez_state)
job->ez_state = VC5_EZ_DISABLED;
job->ez_state = V3D_EZ_DISABLED;
break;
case VC5_EZ_DISABLED:
case V3D_EZ_DISABLED:
/* If the current Z/S state disables EZ because of a bad Z
* func or stencil operation, then we can't do any more EZ in
* this frame.
*/
job->ez_state = VC5_EZ_DISABLED;
job->ez_state = V3D_EZ_DISABLED;
break;
}
@ -1044,11 +1044,11 @@ v3d_update_job_ez(struct v3d_context *v3d, struct v3d_job *job)
* ARB_conservative_depth's hints to avoid this)
*/
if (v3d->prog.fs->prog_data.fs->writes_z) {
job->ez_state = VC5_EZ_DISABLED;
job->ez_state = V3D_EZ_DISABLED;
}
if (job->first_ez_state == VC5_EZ_UNDECIDED &&
(job->ez_state != VC5_EZ_DISABLED || job->draw_calls_queued == 0))
if (job->first_ez_state == V3D_EZ_UNDECIDED &&
(job->ez_state != V3D_EZ_DISABLED || job->draw_calls_queued == 0))
job->first_ez_state = job->ez_state;
}
@ -1224,7 +1224,7 @@ v3d_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info,
if (v3d->prim_mode != info->mode) {
v3d->prim_mode = info->mode;
v3d->dirty |= VC5_DIRTY_PRIM_MODE;
v3d->dirty |= V3D_DIRTY_PRIM_MODE;
}
v3d_start_draw(v3d);
@ -1249,15 +1249,15 @@ v3d_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info,
v3d33_emit_state(pctx);
#endif
if (v3d->dirty & (VC5_DIRTY_VTXBUF |
VC5_DIRTY_VTXSTATE |
VC5_DIRTY_PRIM_MODE |
VC5_DIRTY_RASTERIZER |
VC5_DIRTY_COMPILED_CS |
VC5_DIRTY_COMPILED_VS |
VC5_DIRTY_COMPILED_GS_BIN |
VC5_DIRTY_COMPILED_GS |
VC5_DIRTY_COMPILED_FS |
if (v3d->dirty & (V3D_DIRTY_VTXBUF |
V3D_DIRTY_VTXSTATE |
V3D_DIRTY_PRIM_MODE |
V3D_DIRTY_RASTERIZER |
V3D_DIRTY_COMPILED_CS |
V3D_DIRTY_COMPILED_VS |
V3D_DIRTY_COMPILED_GS_BIN |
V3D_DIRTY_COMPILED_GS |
V3D_DIRTY_COMPILED_FS |
v3d->prog.cs->uniform_dirty_bits |
v3d->prog.vs->uniform_dirty_bits |
(v3d->prog.gs_bin ?

View File

@ -88,7 +88,7 @@ swizzled_border_color(const struct v3d_device_info *devinfo,
uint8_t swiz = chan;
/* If we're doing swizzling in the sampler, then only rearrange the
* border color for the mismatch between the VC5 texture format and
* border color for the mismatch between the V3D texture format and
* the PIPE_FORMAT, since GL_ARB_texture_swizzle will be handled by
* the sampler's swizzle.
*
@ -417,8 +417,8 @@ v3dX(emit_state)(struct pipe_context *pctx)
struct v3d_job *job = v3d->job;
bool rasterizer_discard = v3d->rasterizer->base.rasterizer_discard;
if (v3d->dirty & (VC5_DIRTY_SCISSOR | VC5_DIRTY_VIEWPORT |
VC5_DIRTY_RASTERIZER)) {
if (v3d->dirty & (V3D_DIRTY_SCISSOR | V3D_DIRTY_VIEWPORT |
V3D_DIRTY_RASTERIZER)) {
float *vpscale = v3d->viewport.scale;
float *vptranslate = v3d->viewport.translate;
float vp_minx = -fabsf(vpscale[0]) + vptranslate[0];
@ -470,10 +470,10 @@ v3dX(emit_state)(struct pipe_context *pctx)
job->draw_max_y = MAX2(job->draw_max_y, maxy);
}
if (v3d->dirty & (VC5_DIRTY_RASTERIZER |
VC5_DIRTY_ZSA |
VC5_DIRTY_BLEND |
VC5_DIRTY_COMPILED_FS)) {
if (v3d->dirty & (V3D_DIRTY_RASTERIZER |
V3D_DIRTY_ZSA |
V3D_DIRTY_BLEND |
V3D_DIRTY_COMPILED_FS)) {
cl_emit(&job->bcl, CFG_BITS, config) {
config.enable_forward_facing_primitive =
!rasterizer_discard &&
@ -513,7 +513,7 @@ v3dX(emit_state)(struct pipe_context *pctx)
* along with ZSA
*/
config.early_z_updates_enable =
(job->ez_state != VC5_EZ_DISABLED);
(job->ez_state != V3D_EZ_DISABLED);
if (v3d->zsa->base.depth_enabled) {
config.z_updates_enable =
v3d->zsa->base.depth_writemask;
@ -537,7 +537,7 @@ v3dX(emit_state)(struct pipe_context *pctx)
}
if (v3d->dirty & VC5_DIRTY_RASTERIZER &&
if (v3d->dirty & V3D_DIRTY_RASTERIZER &&
v3d->rasterizer->base.offset_tri) {
if (job->zsbuf &&
job->zsbuf->format == PIPE_FORMAT_Z16_UNORM) {
@ -551,7 +551,7 @@ v3dX(emit_state)(struct pipe_context *pctx)
}
}
if (v3d->dirty & VC5_DIRTY_RASTERIZER) {
if (v3d->dirty & V3D_DIRTY_RASTERIZER) {
cl_emit(&job->bcl, POINT_SIZE, point_size) {
point_size.point_size = v3d->rasterizer->point_size;
}
@ -561,7 +561,7 @@ v3dX(emit_state)(struct pipe_context *pctx)
}
}
if (v3d->dirty & VC5_DIRTY_VIEWPORT) {
if (v3d->dirty & V3D_DIRTY_VIEWPORT) {
cl_emit(&job->bcl, CLIPPER_XY_SCALING, clip) {
clip.viewport_half_width_in_1_256th_of_pixel =
v3d->viewport.scale[0] * 256.0f;
@ -592,7 +592,7 @@ v3dX(emit_state)(struct pipe_context *pctx)
}
}
if (v3d->dirty & VC5_DIRTY_BLEND) {
if (v3d->dirty & V3D_DIRTY_BLEND) {
struct v3d_blend_state *blend = v3d->blend;
if (blend->blend_enables) {
@ -611,7 +611,7 @@ v3dX(emit_state)(struct pipe_context *pctx)
}
}
if (v3d->dirty & VC5_DIRTY_BLEND) {
if (v3d->dirty & V3D_DIRTY_BLEND) {
struct pipe_blend_state *blend = &v3d->blend->base;
cl_emit(&job->bcl, COLOR_WRITE_MASKS, mask) {
@ -628,8 +628,8 @@ v3dX(emit_state)(struct pipe_context *pctx)
/* GFXH-1431: On V3D 3.x, writing BLEND_CONFIG resets the constant
* color.
*/
if (v3d->dirty & VC5_DIRTY_BLEND_COLOR ||
(V3D_VERSION < 41 && (v3d->dirty & VC5_DIRTY_BLEND))) {
if (v3d->dirty & V3D_DIRTY_BLEND_COLOR ||
(V3D_VERSION < 41 && (v3d->dirty & V3D_DIRTY_BLEND))) {
cl_emit(&job->bcl, BLEND_CONSTANT_COLOR, color) {
color.red_f16 = (v3d->swap_color_rb ?
v3d->blend_color.hf[2] :
@ -642,7 +642,7 @@ v3dX(emit_state)(struct pipe_context *pctx)
}
}
if (v3d->dirty & (VC5_DIRTY_ZSA | VC5_DIRTY_STENCIL_REF)) {
if (v3d->dirty & (V3D_DIRTY_ZSA | V3D_DIRTY_STENCIL_REF)) {
struct pipe_stencil_state *front = &v3d->zsa->base.stencil[0];
struct pipe_stencil_state *back = &v3d->zsa->base.stencil[1];
@ -667,17 +667,17 @@ v3dX(emit_state)(struct pipe_context *pctx)
/* Pre-4.x, we have texture state that depends on both the sampler and
* the view, so we merge them together at draw time.
*/
if (v3d->dirty & VC5_DIRTY_FRAGTEX)
if (v3d->dirty & V3D_DIRTY_FRAGTEX)
emit_textures(v3d, &v3d->tex[PIPE_SHADER_FRAGMENT]);
if (v3d->dirty & VC5_DIRTY_GEOMTEX)
if (v3d->dirty & V3D_DIRTY_GEOMTEX)
emit_textures(v3d, &v3d->tex[PIPE_SHADER_GEOMETRY]);
if (v3d->dirty & VC5_DIRTY_VERTTEX)
if (v3d->dirty & V3D_DIRTY_VERTTEX)
emit_textures(v3d, &v3d->tex[PIPE_SHADER_VERTEX]);
#endif
if (v3d->dirty & VC5_DIRTY_FLAT_SHADE_FLAGS) {
if (v3d->dirty & V3D_DIRTY_FLAT_SHADE_FLAGS) {
if (!emit_varying_flags(job,
v3d->prog.fs->prog_data.fs->flat_shade_flags,
emit_flat_shade_flags)) {
@ -686,7 +686,7 @@ v3dX(emit_state)(struct pipe_context *pctx)
}
#if V3D_VERSION >= 40
if (v3d->dirty & VC5_DIRTY_NOPERSPECTIVE_FLAGS) {
if (v3d->dirty & V3D_DIRTY_NOPERSPECTIVE_FLAGS) {
if (!emit_varying_flags(job,
v3d->prog.fs->prog_data.fs->noperspective_flags,
emit_noperspective_flags)) {
@ -694,7 +694,7 @@ v3dX(emit_state)(struct pipe_context *pctx)
}
}
if (v3d->dirty & VC5_DIRTY_CENTROID_FLAGS) {
if (v3d->dirty & V3D_DIRTY_CENTROID_FLAGS) {
if (!emit_varying_flags(job,
v3d->prog.fs->prog_data.fs->centroid_flags,
emit_centroid_flags)) {
@ -706,9 +706,9 @@ v3dX(emit_state)(struct pipe_context *pctx)
/* Set up the transform feedback data specs (which VPM entries to
* output to which buffers).
*/
if (v3d->dirty & (VC5_DIRTY_STREAMOUT |
VC5_DIRTY_RASTERIZER |
VC5_DIRTY_PRIM_MODE)) {
if (v3d->dirty & (V3D_DIRTY_STREAMOUT |
V3D_DIRTY_RASTERIZER |
V3D_DIRTY_PRIM_MODE)) {
struct v3d_streamout_stateobj *so = &v3d->streamout;
if (so->num_targets) {
bool psiz_per_vertex = (v3d->prim_mode == PIPE_PRIM_POINTS &&
@ -749,7 +749,7 @@ v3dX(emit_state)(struct pipe_context *pctx)
}
/* Set up the transform feedback buffers. */
if (v3d->dirty & VC5_DIRTY_STREAMOUT) {
if (v3d->dirty & V3D_DIRTY_STREAMOUT) {
struct v3d_uncompiled_shader *tf_shader = get_tf_shader(v3d);
struct v3d_streamout_stateobj *so = &v3d->streamout;
for (int i = 0; i < so->num_targets; i++) {
@ -793,7 +793,7 @@ v3dX(emit_state)(struct pipe_context *pctx)
}
}
if (v3d->dirty & VC5_DIRTY_OQ) {
if (v3d->dirty & V3D_DIRTY_OQ) {
cl_emit(&job->bcl, OCCLUSION_QUERY_COUNTER, counter) {
if (v3d->active_queries && v3d->current_oq) {
counter.address = cl_address(v3d->current_oq, 0);
@ -802,7 +802,7 @@ v3dX(emit_state)(struct pipe_context *pctx)
}
#if V3D_VERSION >= 40
if (v3d->dirty & VC5_DIRTY_SAMPLE_STATE) {
if (v3d->dirty & V3D_DIRTY_SAMPLE_STATE) {
cl_emit(&job->bcl, SAMPLE_STATE, state) {
/* Note: SampleCoverage was handled at the
* frontend level by converting to sample_mask.

View File

@ -23,7 +23,7 @@
/** @file v3dx_job.c
*
* V3D version-specific functions for submitting VC5 render jobs to the
* V3D version-specific functions for submitting V3D render jobs to the
* kernel.
*/

View File

@ -79,11 +79,11 @@ load_general(struct v3d_cl *cl, struct pipe_surface *psurf, int buffer,
load.input_image_format = surf->format;
load.r_b_swap = surf->swap_rb;
load.force_alpha_1 = util_format_has_alpha1(psurf->format);
if (surf->tiling == VC5_TILING_UIF_NO_XOR ||
surf->tiling == VC5_TILING_UIF_XOR) {
if (surf->tiling == V3D_TILING_UIF_NO_XOR ||
surf->tiling == V3D_TILING_UIF_XOR) {
load.height_in_ub_or_stride =
surf->padded_height_of_output_image_in_uif_blocks;
} else if (surf->tiling == VC5_TILING_RASTER) {
} else if (surf->tiling == V3D_TILING_RASTER) {
struct v3d_resource_slice *slice =
&rsc->slices[psurf->u.tex.level];
load.height_in_ub_or_stride = slice->stride;
@ -149,11 +149,11 @@ store_general(struct v3d_job *job,
store.r_b_swap = surf->swap_rb;
store.memory_format = surf->tiling;
if (surf->tiling == VC5_TILING_UIF_NO_XOR ||
surf->tiling == VC5_TILING_UIF_XOR) {
if (surf->tiling == V3D_TILING_UIF_NO_XOR ||
surf->tiling == V3D_TILING_UIF_XOR) {
store.height_in_ub_or_stride =
surf->padded_height_of_output_image_in_uif_blocks;
} else if (surf->tiling == VC5_TILING_RASTER) {
} else if (surf->tiling == V3D_TILING_RASTER) {
struct v3d_resource_slice *slice =
&rsc->slices[psurf->u.tex.level];
store.height_in_ub_or_stride = slice->stride;
@ -518,7 +518,7 @@ v3d_emit_z_stencil_config(struct v3d_job *job, struct v3d_surface *surf,
zs.padded_height_of_output_image_in_uif_blocks =
surf->padded_height_of_output_image_in_uif_blocks;
assert(surf->tiling != VC5_TILING_RASTER);
assert(surf->tiling != V3D_TILING_RASTER);
zs.memory_format = surf->tiling;
}
@ -679,18 +679,18 @@ v3dX(emit_rcl)(struct v3d_job *job)
/* XXX: Early D/S clear */
switch (job->first_ez_state) {
case VC5_EZ_UNDECIDED:
case VC5_EZ_LT_LE:
case V3D_EZ_UNDECIDED:
case V3D_EZ_LT_LE:
config.early_z_disable = false;
config.early_z_test_and_update_direction =
EARLY_Z_DIRECTION_LT_LE;
break;
case VC5_EZ_GT_GE:
case V3D_EZ_GT_GE:
config.early_z_disable = false;
config.early_z_test_and_update_direction =
EARLY_Z_DIRECTION_GT_GE;
break;
case VC5_EZ_DISABLED:
case V3D_EZ_DISABLED:
config.early_z_disable = true;
}
@ -715,8 +715,8 @@ v3dX(emit_rcl)(struct v3d_job *job)
uint32_t clear_pad = 0;
/* XXX: Set the pad for raster. */
if (surf->tiling == VC5_TILING_UIF_NO_XOR ||
surf->tiling == VC5_TILING_UIF_XOR) {
if (surf->tiling == V3D_TILING_UIF_NO_XOR ||
surf->tiling == V3D_TILING_UIF_XOR) {
int uif_block_height = v3d_utile_height(rsc->cpp) * 2;
uint32_t implicit_padded_height = (align(job->draw_height, uif_block_height) /
uif_block_height);

View File

@ -54,7 +54,7 @@ v3d_set_blend_color(struct pipe_context *pctx,
v3d->blend_color.hf[i] =
_mesa_float_to_half(blend_color->color[i]);
}
v3d->dirty |= VC5_DIRTY_BLEND_COLOR;
v3d->dirty |= V3D_DIRTY_BLEND_COLOR;
}
static void
@ -63,7 +63,7 @@ v3d_set_stencil_ref(struct pipe_context *pctx,
{
struct v3d_context *v3d = v3d_context(pctx);
v3d->stencil_ref = stencil_ref;
v3d->dirty |= VC5_DIRTY_STENCIL_REF;
v3d->dirty |= V3D_DIRTY_STENCIL_REF;
}
static void
@ -72,7 +72,7 @@ v3d_set_clip_state(struct pipe_context *pctx,
{
struct v3d_context *v3d = v3d_context(pctx);
v3d->clip = *clip;
v3d->dirty |= VC5_DIRTY_CLIP;
v3d->dirty |= V3D_DIRTY_CLIP;
}
static void
@ -80,7 +80,7 @@ v3d_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
{
struct v3d_context *v3d = v3d_context(pctx);
v3d->sample_mask = sample_mask & ((1 << V3D_MAX_SAMPLES) - 1);
v3d->dirty |= VC5_DIRTY_SAMPLE_STATE;
v3d->dirty |= V3D_DIRTY_SAMPLE_STATE;
}
static void *
@ -179,18 +179,18 @@ v3d_create_depth_stencil_alpha_state(struct pipe_context *pctx,
switch (cso->depth_func) {
case PIPE_FUNC_LESS:
case PIPE_FUNC_LEQUAL:
so->ez_state = VC5_EZ_LT_LE;
so->ez_state = V3D_EZ_LT_LE;
break;
case PIPE_FUNC_GREATER:
case PIPE_FUNC_GEQUAL:
so->ez_state = VC5_EZ_GT_GE;
so->ez_state = V3D_EZ_GT_GE;
break;
case PIPE_FUNC_NEVER:
case PIPE_FUNC_EQUAL:
so->ez_state = VC5_EZ_UNDECIDED;
so->ez_state = V3D_EZ_UNDECIDED;
break;
default:
so->ez_state = VC5_EZ_DISABLED;
so->ez_state = V3D_EZ_DISABLED;
break;
}
@ -203,7 +203,7 @@ v3d_create_depth_stencil_alpha_state(struct pipe_context *pctx,
(cso->stencil[1].enabled &&
(cso->stencil[1].zfail_op != PIPE_STENCIL_OP_KEEP &&
cso->stencil[1].func != PIPE_FUNC_ALWAYS)))) {
so->ez_state = VC5_EZ_DISABLED;
so->ez_state = V3D_EZ_DISABLED;
}
}
@ -261,7 +261,7 @@ v3d_set_polygon_stipple(struct pipe_context *pctx,
{
struct v3d_context *v3d = v3d_context(pctx);
v3d->stipple = *stipple;
v3d->dirty |= VC5_DIRTY_STIPPLE;
v3d->dirty |= V3D_DIRTY_STIPPLE;
}
static void
@ -273,7 +273,7 @@ v3d_set_scissor_states(struct pipe_context *pctx,
struct v3d_context *v3d = v3d_context(pctx);
v3d->scissor = *scissor;
v3d->dirty |= VC5_DIRTY_SCISSOR;
v3d->dirty |= V3D_DIRTY_SCISSOR;
}
static void
@ -284,7 +284,7 @@ v3d_set_viewport_states(struct pipe_context *pctx,
{
struct v3d_context *v3d = v3d_context(pctx);
v3d->viewport = *viewport;
v3d->dirty |= VC5_DIRTY_VIEWPORT;
v3d->dirty |= V3D_DIRTY_VIEWPORT;
}
static void
@ -303,7 +303,7 @@ v3d_set_vertex_buffers(struct pipe_context *pctx,
take_ownership);
so->count = util_last_bit(so->enabled_mask);
v3d->dirty |= VC5_DIRTY_VTXBUF;
v3d->dirty |= V3D_DIRTY_VTXBUF;
}
static void
@ -311,7 +311,7 @@ v3d_blend_state_bind(struct pipe_context *pctx, void *hwcso)
{
struct v3d_context *v3d = v3d_context(pctx);
v3d->blend = hwcso;
v3d->dirty |= VC5_DIRTY_BLEND;
v3d->dirty |= V3D_DIRTY_BLEND;
}
static void
@ -319,7 +319,7 @@ v3d_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso)
{
struct v3d_context *v3d = v3d_context(pctx);
v3d->rasterizer = hwcso;
v3d->dirty |= VC5_DIRTY_RASTERIZER;
v3d->dirty |= V3D_DIRTY_RASTERIZER;
}
static void
@ -327,7 +327,7 @@ v3d_zsa_state_bind(struct pipe_context *pctx, void *hwcso)
{
struct v3d_context *v3d = v3d_context(pctx);
v3d->zsa = hwcso;
v3d->dirty |= VC5_DIRTY_ZSA;
v3d->dirty |= V3D_DIRTY_ZSA;
}
static void *
@ -445,7 +445,7 @@ v3d_vertex_state_bind(struct pipe_context *pctx, void *hwcso)
{
struct v3d_context *v3d = v3d_context(pctx);
v3d->vtx = hwcso;
v3d->dirty |= VC5_DIRTY_VTXSTATE;
v3d->dirty |= V3D_DIRTY_VTXSTATE;
}
static void
@ -469,7 +469,7 @@ v3d_set_constant_buffer(struct pipe_context *pctx, uint shader, uint index,
so->enabled_mask |= 1 << index;
so->dirty_mask |= 1 << index;
v3d->dirty |= VC5_DIRTY_CONSTBUF;
v3d->dirty |= V3D_DIRTY_CONSTBUF;
}
static void
@ -505,7 +505,7 @@ v3d_set_framebuffer_state(struct pipe_context *pctx,
v3d->blend_dst_alpha_one |= 1 << i;
}
v3d->dirty |= VC5_DIRTY_FRAMEBUFFER;
v3d->dirty |= V3D_DIRTY_FRAMEBUFFER;
}
static enum V3DX(Wrap_Mode)
@ -872,9 +872,9 @@ v3d_setup_texture_shader_state(struct V3DX(TEXTURE_SHADER_STATE) *tex,
* that way.
*/
tex->level_0_is_strictly_uif =
(rsc->slices[0].tiling == VC5_TILING_UIF_XOR ||
rsc->slices[0].tiling == VC5_TILING_UIF_NO_XOR);
tex->level_0_xor_enable = (rsc->slices[0].tiling == VC5_TILING_UIF_XOR);
(rsc->slices[0].tiling == V3D_TILING_UIF_XOR ||
rsc->slices[0].tiling == V3D_TILING_UIF_NO_XOR);
tex->level_0_xor_enable = (rsc->slices[0].tiling == V3D_TILING_UIF_XOR);
if (tex->level_0_is_strictly_uif)
tex->level_0_ub_pad = rsc->slices[0].ub_pad;
@ -1243,7 +1243,7 @@ v3d_set_stream_output_targets(struct pipe_context *pctx,
&ctx->prim_counts);
}
ctx->dirty |= VC5_DIRTY_STREAMOUT;
ctx->dirty |= V3D_DIRTY_STREAMOUT;
}
static void
@ -1291,7 +1291,7 @@ v3d_set_shader_buffers(struct pipe_context *pctx,
so->enabled_mask &= ~mask;
}
v3d->dirty |= VC5_DIRTY_SSBO;
v3d->dirty |= V3D_DIRTY_SSBO;
}
static void
@ -1383,7 +1383,7 @@ v3d_set_shader_images(struct pipe_context *pctx,
so->enabled_mask &= ~(((1 << count) - 1) << start);
}
v3d->dirty |= VC5_DIRTY_SHADER_IMAGE;
v3d->dirty |= V3D_DIRTY_SHADER_IMAGE;
if (unbind_num_trailing_slots) {
v3d_set_shader_images(pctx, shader, start + count,