radeon: Add some debug output to texture function.\n

This commit is contained in:
Pauli Nieminen 2010-02-06 13:13:18 +02:00
parent e6f2819e7a
commit cdb0a51e8a
1 changed files with 85 additions and 27 deletions

View File

@ -33,6 +33,7 @@
#include "main/imports.h"
#include "main/context.h"
#include "main/convolve.h"
#include "main/enums.h"
#include "main/mipmap.h"
#include "main/texcompress.h"
#include "main/texstore.h"
@ -53,6 +54,13 @@ void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
assert(rowsize <= dststride);
assert(rowsize <= srcstride);
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s dst %p, stride %u, src %p, stride %u, "
"numrows %u, rowsize %u.\n",
__func__, dst, dststride,
src, srcstride,
numrows, rowsize);
if (rowsize == srcstride && rowsize == dststride) {
memcpy(dst, src, numrows*rowsize);
} else {
@ -102,8 +110,12 @@ static void teximage_set_map_data(radeon_texture_image *image)
{
radeon_mipmap_level *lvl;
if (!image->mt)
if (!image->mt) {
radeon_warning("%s(%p) Trying to set map data without miptree.\n",
__func__, image);
return;
}
lvl = &image->mt->levels[image->mtlevel];
@ -162,15 +174,31 @@ void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
radeonTexObj* t = radeon_tex_obj(texObj);
int face, level;
if (!radeon_validate_texture_miptree(ctx, texObj))
return;
radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
"%s(%p, tex %p)\n",
__func__, ctx, texObj);
if (!radeon_validate_texture_miptree(ctx, texObj)) {
radeon_error("%s(%p, tex %p) Failed to validate miptree for "
"sw fallback.\n",
__func__, ctx, texObj);
return;
}
if (t->image_override && t->bo) {
radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
"%s(%p, tex %p) Work around for missing miptree in r100.\n",
__func__, ctx, texObj);
map_override(ctx, t);
}
/* for r100 3D sw fallbacks don't have mt */
if (t->image_override && t->bo)
map_override(ctx, t);
if (!t->mt)
if (!t->mt) {
radeon_warning("%s(%p, tex %p) No miptree in texture.\n",
__func__, ctx, texObj);
return;
}
radeon_bo_map(t->mt->bo, GL_FALSE);
for(face = 0; face < t->mt->faces; ++face) {
@ -184,6 +212,10 @@ void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
radeonTexObj* t = radeon_tex_obj(texObj);
int face, level;
radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
"%s(%p, tex %p)\n",
__func__, ctx, texObj);
if (t->image_override && t->bo)
unmap_override(ctx, t);
/* for r100 3D sw fallbacks don't have mt */
@ -210,6 +242,10 @@ static void radeon_generate_mipmap(GLcontext *ctx, GLenum target,
GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
int i, face;
radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
"%s(%p, tex %p) Target type %s.\n",
__func__, ctx, texObj,
_mesa_lookup_enum_by_nr(target));
_mesa_generate_mipmap(ctx, target, texObj);
@ -240,6 +276,10 @@ void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_objec
bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo;
if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
"%s(%p, tex %p) Trying to generate mipmap for texture "
"in processing by GPU.\n",
__func__, ctx, texObj);
radeon_firevertices(rmesa);
}
@ -304,12 +344,14 @@ gl_format radeonChooseTextureFormat(GLcontext * ctx,
(rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
(void)format;
#if 0
fprintf(stderr, "InternalFormat=%s(%d) type=%s format=%s\n",
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s InternalFormat=%s(%d) type=%s format=%s\n",
__func__,
_mesa_lookup_enum_by_nr(internalFormat), internalFormat,
_mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
fprintf(stderr, "do32bpt=%d force16bpt=%d\n", do32bpt, force16bpt);
#endif
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s do32bpt=%d force16bpt=%d\n",
__func__, do32bpt, force16bpt);
switch (internalFormat) {
case 4:
@ -558,11 +600,10 @@ static void teximage_assign_miptree(radeonContextPtr rmesa,
if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
radeon_miptree_unreference(&t->mt);
radeon_try_alloc_miptree(rmesa, t);
if (RADEON_DEBUG & RADEON_TEXTURE) {
fprintf(stderr, "%s: texObj %p, texImage %p, face %d, level %d, "
radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
"%s: texObj %p, texImage %p, face %d, level %d, "
"texObj miptree doesn't match, allocated new miptree %p\n",
__FUNCTION__, texObj, texImage, face, level, t->mt);
}
}
/* Miptree alocation may have failed,
@ -571,7 +612,9 @@ static void teximage_assign_miptree(radeonContextPtr rmesa,
image->mtface = face;
image->mtlevel = level;
radeon_miptree_reference(t->mt, &image->mt);
}
} else
radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
"%s Failed to allocate miptree.\n", __func__);
}
static GLuint * allocate_image_offsets(GLcontext *ctx,
@ -615,6 +658,10 @@ static void radeon_store_teximage(GLcontext* ctx, int dims,
GLuint dstRowStride;
GLuint *dstImageOffsets;
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s(%p, tex %p, image %p) compressed %d\n",
__func__, ctx, texObj, texImage, compressed);
if (image->mt) {
dstRowStride = image->mt->levels[image->mtlevel].rowstride;
} else if (t->bo) {
@ -631,6 +678,7 @@ static void radeon_store_teximage(GLcontext* ctx, int dims,
unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat);
dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, texImage->Height, texImage->Depth);
if (!dstImageOffsets) {
radeon_warning("%s Failed to allocate dstImaeOffset.\n", __func__);
return;
}
} else {
@ -704,18 +752,21 @@ static void radeon_teximage(
GLint postConvHeight = height;
GLuint face = _mesa_tex_target_to_face(target);
radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
"%s %dd: texObj %p, texImage %p, face %d, level %d\n",
__func__, dims, texObj, texImage, face, level);
{
struct radeon_bo *bo;
bo = !image->mt ? image->bo : image->mt->bo;
if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
"%s Calling teximage for texture that is "
"queued for GPU processing.\n",
__func__);
radeon_firevertices(rmesa);
}
}
if (RADEON_DEBUG & RADEON_TEXTURE) {
fprintf(stderr, "radeon_teximage%dd: texObj %p, texImage %p, face %d, level %d\n",
dims, texObj, texImage, face, level);
}
t->validated = GL_FALSE;
@ -747,11 +798,10 @@ static void radeon_teximage(
texImage->Height,
texImage->Depth);
texImage->Data = _mesa_alloc_texmemory(size);
if (RADEON_DEBUG & RADEON_TEXTURE) {
fprintf(stderr, "radeon_teximage%dd: texObj %p, texImage %p, "
radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
"%s %dd: texObj %p, texImage %p, "
" no miptree assigned, using local memory %p\n",
dims, texObj, texImage, texImage->Data);
}
__func__, dims, texObj, texImage, texImage->Data);
}
}
@ -845,18 +895,22 @@ static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int leve
radeonTexObj* t = radeon_tex_obj(texObj);
radeon_texture_image* image = get_radeon_texture_image(texImage);
radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
"%s %dd: texObj %p, texImage %p, face %d, level %d\n",
__func__, dims, texObj, texImage,
_mesa_tex_target_to_face(target), level);
{
struct radeon_bo *bo;
bo = !image->mt ? image->bo : image->mt->bo;
if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
"%s Calling texsubimage for texture that is "
"queued for GPU processing.\n",
__func__);
radeon_firevertices(rmesa);
}
}
if (RADEON_DEBUG & RADEON_TEXTURE) {
fprintf(stderr, "radeon_texsubimage%dd: texObj %p, texImage %p, face %d, level %d\n",
dims, texObj, texImage, _mesa_tex_target_to_face(target), level);
}
t->validated = GL_FALSE;
if (compressed) {
@ -945,6 +999,10 @@ radeon_get_tex_image(GLcontext * ctx, GLenum target, GLint level,
{
radeon_texture_image *image = get_radeon_texture_image(texImage);
radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
"%s(%p, tex %p, image %p) compressed %d.\n",
__func__, ctx, texObj, image, compressed);
if (image->mt) {
/* Map the texture image read-only */
radeon_teximage_map(image, GL_FALSE);