util,nir: Move mask_sign_extend from opt_load_store_vectorize to util

While we're moving it, reformat a bit to make it match util_sign_extend
better.

Reviewed-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Reviewed-by: Kristian H. Kristensen <hoegsberg@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17214>
This commit is contained in:
Jason Ekstrand 2022-06-23 12:55:40 -05:00 committed by Marge Bot
parent a307bc8556
commit efc63ea02d
2 changed files with 19 additions and 12 deletions

View File

@ -360,17 +360,11 @@ type_scalar_size_bytes(const struct glsl_type *type)
return glsl_type_is_boolean(type) ? 4u : glsl_get_bit_size(type) / 8u; return glsl_type_is_boolean(type) ? 4u : glsl_get_bit_size(type) / 8u;
} }
static uint64_t
mask_sign_extend(uint64_t val, unsigned bit_size)
{
return (int64_t)(val << (64 - bit_size)) >> (64 - bit_size);
}
static unsigned static unsigned
add_to_entry_key(nir_ssa_scalar *offset_defs, uint64_t *offset_defs_mul, add_to_entry_key(nir_ssa_scalar *offset_defs, uint64_t *offset_defs_mul,
unsigned offset_def_count, nir_ssa_scalar def, uint64_t mul) unsigned offset_def_count, nir_ssa_scalar def, uint64_t mul)
{ {
mul = mask_sign_extend(mul, def.def->bit_size); mul = util_mask_sign_extend(mul, def.def->bit_size);
for (unsigned i = 0; i <= offset_def_count; i++) { for (unsigned i = 0; i <= offset_def_count; i++) {
if (i == offset_def_count || def.def->index > offset_defs[i].def->index) { if (i == offset_def_count || def.def->index > offset_defs[i].def->index) {
@ -437,7 +431,7 @@ create_entry_key_from_deref(void *mem_ctx,
nir_ssa_scalar base = {.def=index, .comp=0}; nir_ssa_scalar base = {.def=index, .comp=0};
uint64_t offset = 0, base_mul = 1; uint64_t offset = 0, base_mul = 1;
parse_offset(&base, &base_mul, &offset); parse_offset(&base, &base_mul, &offset);
offset = mask_sign_extend(offset, index->bit_size); offset = util_mask_sign_extend(offset, index->bit_size);
*offset_base += offset * stride; *offset_base += offset * stride;
if (base.def) { if (base.def) {
@ -609,7 +603,7 @@ create_entry(struct vectorize_ctx *ctx,
entry->offset = offset; entry->offset = offset;
if (base) if (base)
entry->offset = mask_sign_extend(entry->offset, base->bit_size); entry->offset = util_mask_sign_extend(entry->offset, base->bit_size);
} }
if (entry->info->resource_src >= 0) if (entry->info->resource_src >= 0)

View File

@ -579,6 +579,21 @@ util_bswap16(uint16_t n)
(n << 8); (n << 8);
} }
/**
* Mask and sign-extend a number
*
* The bit at position `width - 1` is replicated to all the higher bits.
* This makes no assumptions about the high bits of the value and will
* overwrite them with the sign bit.
*/
static inline int64_t
util_mask_sign_extend(uint64_t val, unsigned width)
{
assert(width > 0 && width <= 64);
unsigned shift = 64 - width;
return (int64_t)(val << shift) >> shift;
}
/** /**
* Sign-extend a number * Sign-extend a number
* *
@ -588,10 +603,8 @@ util_bswap16(uint16_t n)
static inline int64_t static inline int64_t
util_sign_extend(uint64_t val, unsigned width) util_sign_extend(uint64_t val, unsigned width)
{ {
assert(width > 0 && width <= 64);
assert(width == 64 || val < (UINT64_C(1) << width)); assert(width == 64 || val < (UINT64_C(1) << width));
unsigned shift = 64 - width; return util_mask_sign_extend(val, width);
return (int64_t)(val << shift) >> shift;
} }
static inline void* static inline void*