code cleanup heheh

This commit is contained in:
Daniel Borca 2004-03-29 06:51:41 +00:00
parent 238693544c
commit 71c7c1feb6
9 changed files with 119 additions and 115 deletions

View File

@ -51,8 +51,29 @@ Linux:
Tuning:
-------
Compilation defines:
--------------------
FX_DEBUG
enable driver debug code
FX_TRAP_GLIDE
enable Glide trace code
FX_TC_NAPALM
map GL_COMPRESSED_RGB[A] to FXT1. This will have effect on Napalm
only (can coexist with FX_TC_NCC, but has higher priority)
FX_TC_NCC
map GL_COMPRESSED_RGB[A] to NCC. This will have effect on any 3dfx
HW (can coexist with FX_TC_NAPALM, but has lesser priority)
FX_COMPRESS_S3TC_AS_FXT1_HACK
map S3TC to FXT1
FX_RESCALE_BIG_TEXURES_HACK
fake textures larger than HW can support
(see MESA_FX_MAXLOD environment variable)
Environment variables:
----------------------
The following environment variables affect MesaFX. Those that affect Glide
only, are beyond the scope of this file. Entries that don't have a "Value"

View File

@ -386,14 +386,7 @@ fxMesaCreateContext(GLuint win,
Glide->txMipQuantize &&
Glide->txPalToNcc && !getenv("MESA_FX_IGNORE_TEXUS2");
/*
* Pixel tables are used during pixel read-back
* Either initialize them for RGB or BGR order;
* However, 32bit capable cards have the right order.
* As a consequence, 32bit read-back is not swizzled!
* Also determine if we need vertex snapping.
*/
/* number of SLI units and AA Samples per chip */
/* Determine if we need vertex swapping, RGB order and SLI/AA */
sliaa = 0;
switch (fxMesa->type) {
case GR_SSTTYPE_VOODOO:
@ -408,6 +401,7 @@ fxMesaCreateContext(GLuint win,
break;
case GR_SSTTYPE_Voodoo4:
case GR_SSTTYPE_Voodoo5:
/* number of SLI units and AA Samples per chip */
if ((str = Glide->grGetRegistryOrEnvironmentStringExt("SSTH3_SLI_AA_CONFIGURATION")) != NULL) {
sliaa = atoi(str);
}

View File

@ -1362,7 +1362,7 @@ fxDDInitFxMesaContext(fxMesaContext fxMesa)
textureLevels++;
} while ((textureSize >>= 0x1) & 0x7ff);
ctx->Const.MaxTextureLevels = textureLevels;
#if 1||FX_RESCALE_BIG_TEXURES
#if FX_RESCALE_BIG_TEXURES_HACK
fxMesa->textureMaxLod = textureLevels - 1;
if ((env = getenv("MESA_FX_MAXLOD")) != NULL) {
int maxLevels = atoi(env) + 1;

View File

@ -1233,7 +1233,7 @@ fxDDTexImage2D(GLcontext * ctx, GLenum target, GLint level,
mml->width = width * mml->wScale;
mml->height = height * mml->hScale;
#if 0 && FX_COMPRESS_S3TC_AS_FXT1_HACK
#if FX_COMPRESS_S3TC_AS_FXT1_HACK
/* [koolsmoky] substitute FXT1 for DXTn and Legacy S3TC */
/* [dBorca] we should update texture's attribute, then,
* because if the application asks us to decompress, we
@ -1257,16 +1257,6 @@ fxDDTexImage2D(GLcontext * ctx, GLenum target, GLint level,
}
}
#endif
#if 0 && FX_COMPRESS_DXT5_AS_DXT3_HACK
/* [dBorca] either VSA is stupid at DXT5,
* or our compression tool is broken. See
* above for caveats.
*/
if ((texImage->IsCompressed) &&
(internalFormat == GL_COMPRESSED_RGBA_S3TC_DXT5_EXT)) {
internalFormat = GL_COMPRESSED_RGBA_S3TC_DXT3_EXT;
}
#endif
/* choose the texture format */
assert(ctx->Driver.ChooseTextureFormat);

View File

@ -730,4 +730,8 @@ extern int TDFX_DEBUG;
#define TDFX_DEBUG 0
#endif
/* dirty hacks */
#define FX_RESCALE_BIG_TEXURES_HACK 1
#define FX_COMPRESS_S3TC_AS_FXT1_HACK 0
#endif

View File

@ -38,7 +38,7 @@
#include <stdarg.h>
#include <assert.h>
#define DEBUG_TRAP_internal
#define FX_TRAP_GLIDE_internal
#include "fxg.h"
@ -46,7 +46,7 @@
/****************************************************************************\
* logging *
\****************************************************************************/
#if DEBUG_TRAP
#if FX_TRAP_GLIDE
#define TRAP_LOG trp_printf
#ifdef __GNUC__
__attribute__ ((format(printf, 1, 2)))
@ -66,17 +66,17 @@ int trp_printf (const char *format, ...)
va_end(arg);
return n;
}
#else /* DEBUG_TRAP */
#else /* FX_TRAP_GLIDE */
#ifdef __GNUC__
#define TRAP_LOG(format, ...) do {} while (0)
#else /* __GNUC__ */
#define TRAP_LOG 0 && (unsigned long)
#endif /* __GNUC__ */
#endif /* DEBUG_TRAP */
#endif /* FX_TRAP_GLIDE */
#if DEBUG_TRAP
#if FX_TRAP_GLIDE
/****************************************************************************\
* helpers *
\****************************************************************************/
@ -2242,15 +2242,15 @@ void FX_CALL fake_grTexNCCTableExt (GrChipID_t tmu,
\****************************************************************************/
void tdfx_hook_glide (struct tdfx_glide *Glide)
{
#if DEBUG_TRAP
#if FX_TRAP_GLIDE
#define GET_EXT_ADDR(name) *(GrProc *)&real_##name = grGetProcAddress(#name), Glide->name = trap_##name
#define GET_EXT_FAKE(name) GET_EXT_ADDR(name); if (real_##name == NULL) real_##name = fake_##name
#define GET_EXT_NULL(name) GET_EXT_ADDR(name); if (real_##name == NULL) Glide->name = NULL
#else /* DEBUG_TRAP */
#else /* FX_TRAP_GLIDE */
#define GET_EXT_ADDR(name) *(GrProc *)&Glide->name = grGetProcAddress(#name)
#define GET_EXT_FAKE(name) GET_EXT_ADDR(name); if (Glide->name == NULL) Glide->name = fake_##name
#define GET_EXT_NULL(name) GET_EXT_ADDR(name)
#endif /* DEBUG_TRAP */
#endif /* FX_TRAP_GLIDE */
/*
** glide extensions

View File

@ -37,9 +37,11 @@
#include <glide.h>
#include <g3ext.h>
#define DEBUG_TRAP 0
#ifndef FX_TRAP_GLIDE
#define FX_TRAP_GLIDE 0
#endif
#if DEBUG_TRAP
#if FX_TRAP_GLIDE
/*
** rendering functions
*/
@ -171,7 +173,7 @@ void FX_CALL trap_guFogGenerateExp (GrFog_t *fogtable, float density);
void FX_CALL trap_guFogGenerateExp2 (GrFog_t *fogtable, float density);
void FX_CALL trap_guFogGenerateLinear (GrFog_t *fogtable, float nearZ, float farZ);
#ifndef DEBUG_TRAP_internal
#ifndef FX_TRAP_GLIDE_internal
/*
** rendering functions
*/
@ -302,8 +304,8 @@ void FX_CALL trap_guFogGenerateLinear (GrFog_t *fogtable, float nearZ, float far
#define guFogGenerateExp trap_guFogGenerateExp
#define guFogGenerateExp2 trap_guFogGenerateExp2
#define guFogGenerateLinear trap_guFogGenerateLinear
#endif /* DEBUG_TRAP_internal */
#endif /* DEBUG_TRAP */
#endif /* FX_TRAP_GLIDE_internal */
#endif /* FX_TRAP_GLIDE */

View File

@ -65,85 +65,79 @@ fxTexValidate(GLcontext * ctx, struct gl_texture_object *tObj)
minl = ti->minLevel = tObj->BaseLevel;
maxl = ti->maxLevel = MIN2(tObj->MaxLevel, tObj->Image[0][0]->MaxLog2);
#if 1||FX_RESCALE_BIG_TEXURES
#if FX_RESCALE_BIG_TEXURES_HACK
{
extern void _mesa_rescale_teximage2d( GLuint bytesPerPixel,
GLuint dstRowStride,
GLint srcWidth, GLint srcHeight,
GLint dstWidth, GLint dstHeight,
const GLvoid *srcImage, GLvoid *dstImage );
fxMesaContext fxMesa = FX_CONTEXT(ctx);
if (maxl - minl > fxMesa->textureMaxLod) {
/* [dBorca]
* Ooooooook! Here's a(nother) long story.
* We get here because we need to handle a texture larger
* than hardware can support. Two cases:
* 1) we have mipmaps. Then we just push up to the first supported
* LOD. A possible drawback is that Mesa will ignore the skipped
* LODs on further texture handling.
* Will this interfere with GL_TEXTURE_[MIN|BASE]_LEVEL? How?
* 2) we don't have mipmaps. We need to rescale texture; two ways:
* a) create a new LOD and push up ti->minLevel and tObj->BaseLevel
* but this means we need to rescale on both axes, which
* yield unnecessary ugly texture. Also, same issues as 1)
* b) rescale the biggest LOD in place and go two ways:
* - update texImage->Width and texImage->Height, then
* decrease maxLevel, so we won't rescale again on the
* next validation. Changing texImage-> parameters is
* not quite legal here (see convolution), but...
* - leaving texImage-> parameters alone, while rescaling
* texture and decreasing maxLevel makes Mesa puke. Also
* this approach requires that mml->[wh]Scale go below 1,
* otherwise bad ju-ju will be in our future (see fetch_texel)
* Will this interfere with GL_TEXTURE_MAX_LEVEL? How?
* The above approach is somehow dumb! we might have rescaled
* once in TexImage2D to accomodate aspect ratio, and now we
* are rescaling again. The thing is, in TexImage2D we don't
* know whether we'll hit 1) or 2) by the time of validation.
* NB: we could handle mml->[wh]Scale nicely, using (biased) shifts.
*
* Which brings me to another issue. How can we handle NPOT textures?
* - rescaling NPOT to the next bigger POT (mml->[wh]Scale can't shift)
* - upping the max LOD to the next power-of-two, in fxTexGetInfo; then
* choosing non-power-of-two values for ti->[st]Scale... Anyhow, we
* still need to align mipmaps correctly in texture memory!
*/
if ((tObj->MinFilter == GL_NEAREST) || (tObj->MinFilter == GL_LINEAR)) {
/* no mipmaps! need to rescale */
struct gl_texture_image *texImage = tObj->Image[0][minl];
tfxMipMapLevel *mml = FX_MIPMAP_DATA(texImage);
GLint texelBytes = texImage->TexFormat->TexelBytes;
GLvoid *texImage_Data = texImage->Data;
GLint _w = MIN2(mml->width, 1 << fxMesa->textureMaxLod);
GLint _h = MIN2(mml->height, 1 << fxMesa->textureMaxLod);
if (TDFX_DEBUG & VERBOSE_TEXTURE) {
fprintf(stderr, "fxTexValidate: rescaling %d x %d -> %d x %d\n",
mml->width, mml->height,
_w, _h);
}
fxTexGetInfo(_w, _h, NULL, NULL, NULL, NULL,
&(mml->wScale), &(mml->hScale));
texImage->Width = _w / mml->wScale;
texImage->Height = _h / mml->hScale;
texImage->Data = MESA_PBUFFER_ALLOC(_w * _h * texelBytes);
_mesa_rescale_teximage2d(texelBytes,
_w * texelBytes, /* dst stride */
mml->width, mml->height, /* src */
_w, _h, /* dst */
texImage_Data /*src*/, texImage->Data /*dst*/ );
MESA_PBUFFER_FREE(texImage_Data);
mml->width = _w;
mml->height = _h;
maxl = ti->maxLevel = tObj->Image[0][0]->MaxLog2 = minl + fxMesa->textureMaxLod;
} else {
/* skip a certain number of LODs */
minl += maxl - fxMesa->textureMaxLod;
if (TDFX_DEBUG & VERBOSE_TEXTURE) {
fprintf(stderr, "fxTexValidate: skipping %d LODs\n", minl - ti->minLevel);
}
ti->minLevel = tObj->BaseLevel = minl;
}
}
extern void _mesa_rescale_teximage2d( GLuint bytesPerPixel,
GLuint dstRowStride,
GLint srcWidth, GLint srcHeight,
GLint dstWidth, GLint dstHeight,
const GLvoid *srcImage, GLvoid *dstImage );
fxMesaContext fxMesa = FX_CONTEXT(ctx);
/* [dBorca]
* Ooooooook! Here's a(nother) long story.
* We get here because we need to handle a texture larger
* than hardware can support. Two cases:
* 1) we have mipmaps. Then we just push up to the first supported
* LOD. A possible drawback is that Mesa will ignore the skipped
* LODs on further texture handling.
* Will this interfere with GL_TEXTURE_[MIN|BASE]_LEVEL? How?
* 2) we don't have mipmaps. We need to rescale the big LOD in place.
* The above approach is somehow dumb! we might have rescaled
* once in TexImage2D to accomodate aspect ratio, and now we
* are rescaling again. The thing is, in TexImage2D we don't
* know whether we'll hit 1) or 2) by the time of validation.
* NB: we could handle mml->[wh]Scale nicely, using (biased) shifts.
*
* Which brings me to another issue. How can we handle NPOT textures?
* - rescaling NPOT to the next bigger POT (mml->[wh]Scale can't shift)
* - upping the max LOD to the next power-of-two, in fxTexGetInfo; then
* choosing non-power-of-two values for ti->[st]Scale... Anyhow, we
* still need to align mipmaps correctly in texture memory!
*/
if ((tObj->MinFilter == GL_NEAREST) || (tObj->MinFilter == GL_LINEAR)) {
/* no mipmaps! */
struct gl_texture_image *texImage = tObj->Image[0][minl];
tfxMipMapLevel *mml = FX_MIPMAP_DATA(texImage);
GLint _w, _h, maxSize = 1 << fxMesa->textureMaxLod;
if ((mml->width > maxSize) || (mml->height > maxSize)) {
/* need to rescale */
GLint texelBytes = texImage->TexFormat->TexelBytes;
GLvoid *texImage_Data = texImage->Data;
_w = MIN2(texImage->Width, maxSize);
_h = MIN2(texImage->Height, maxSize);
if (TDFX_DEBUG & VERBOSE_TEXTURE) {
fprintf(stderr, "fxTexValidate: rescaling %d x %d -> %d x %d\n",
texImage->Width, texImage->Height, _w, _h);
}
/* we should leave these as is and... (!) */
texImage->Width = _w;
texImage->Height = _h;
fxTexGetInfo(_w, _h, NULL, NULL, NULL, NULL,
&(mml->wScale), &(mml->hScale));
_w *= mml->wScale;
_h *= mml->hScale;
texImage->Data = MESA_PBUFFER_ALLOC(_w * _h * texelBytes);
_mesa_rescale_teximage2d(texelBytes,
_w * texelBytes, /* dst stride */
mml->width, mml->height, /* src */
_w, _h, /* dst */
texImage_Data /*src*/, texImage->Data /*dst*/ );
MESA_PBUFFER_FREE(texImage_Data);
mml->width = _w;
mml->height = _h;
/* (!) ... and set mml->wScale = _w / texImage->Width */
}
} else {
/* mipmapping */
if (maxl - minl > fxMesa->textureMaxLod) {
/* skip a certain number of LODs */
minl += maxl - fxMesa->textureMaxLod;
if (TDFX_DEBUG & VERBOSE_TEXTURE) {
fprintf(stderr, "fxTexValidate: skipping %d LODs\n", minl - ti->minLevel);
}
ti->minLevel = tObj->BaseLevel = minl;
}
}
}
#endif

View File

@ -481,9 +481,8 @@ void fxBuildVertices( GLcontext *ctx, GLuint start, GLuint count,
if (newinputs & VERT_BIT_COLOR0)
ind |= SETUP_RGBA;
if (newinputs & VERT_BIT_COLOR1) {
if (newinputs & VERT_BIT_COLOR1)
ind |= SETUP_SPEC;
}
if (newinputs & VERT_BIT_TEX0)
ind |= SETUP_TMU0;