classic/r100: Delete driver

This is now only going to be available in the Amber branch

Reviewed-by: Emma Anholt <emma@anholt.net>
Acked-by: Jason Ekstrand <jason@jlekstrand.net>
Acked-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Adam Jackson <ajax@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10153>
This commit is contained in:
Dylan Baker 2021-04-09 09:48:36 -07:00 committed by Marge Bot
parent 76791db088
commit 4d45b280bf
60 changed files with 6 additions and 21038 deletions

View File

@ -825,7 +825,7 @@ fedora-release:
-Wno-error=uninitialized
CPP_ARGS: >
-Wno-error=array-bounds
DRI_DRIVERS: "nouveau,i915,i965,r100,r200"
DRI_DRIVERS: "nouveau,i915,i965,r200"
DRI_LOADERS: >
-D glx=dri
-D gbm=enabled
@ -1118,7 +1118,7 @@ debian-i386:
CROSS: i386
VULKAN_DRIVERS: intel,amd,swrast,virtio-experimental
GALLIUM_DRIVERS: "iris,nouveau,r300,r600,radeonsi,swrast,virgl,zink,crocus"
DRI_DRIVERS: "i915,i965,r100,r200,nouveau"
DRI_DRIVERS: "i915,i965,r200,nouveau"
EXTRA_OPTION: >
-D vulkan-layers=device-select,overlay

View File

@ -1,23 +0,0 @@
CHIPSET(0x4C57, RADEON_LW, RV200)
CHIPSET(0x4C58, RADEON_LX, RV200)
CHIPSET(0x4C59, RADEON_LY, RV100)
CHIPSET(0x4C5A, RADEON_LZ, RV100)
CHIPSET(0x5144, RADEON_QD, R100)
CHIPSET(0x5145, RADEON_QE, R100)
CHIPSET(0x5146, RADEON_QF, R100)
CHIPSET(0x5147, RADEON_QG, R100)
CHIPSET(0x5159, RADEON_QY, RV100)
CHIPSET(0x515A, RADEON_QZ, RV100)
CHIPSET(0x5157, RV200_QW, RV200)
CHIPSET(0x5158, RV200_QX, RV200)
CHIPSET(0x515E, RN50_515E, UNKNOWN)
CHIPSET(0x5969, RN50_5969, UNKNOWN)
CHIPSET(0x4136, RS100_4136, RS100)
CHIPSET(0x4336, RS100_4336, RS100)
CHIPSET(0x4137, RS200_4137, RS200)
CHIPSET(0x4337, RS200_4337, RS200)
CHIPSET(0x4237, RS250_4237, RS200)
CHIPSET(0x4437, RS250_4437, RS200)

View File

@ -178,11 +178,11 @@ if dri_drivers.contains('auto')
if system_has_kms_drm
# TODO: PPC, Sparc
if ['x86', 'x86_64'].contains(host_machine.cpu_family())
dri_drivers = ['i915', 'i965', 'r100', 'r200', 'nouveau']
dri_drivers = ['i915', 'i965', 'r200', 'nouveau']
elif ['arm', 'aarch64'].contains(host_machine.cpu_family())
dri_drivers = []
elif ['mips', 'mips64', 'riscv32', 'riscv64'].contains(host_machine.cpu_family())
dri_drivers = ['r100', 'r200', 'nouveau']
dri_drivers = ['r200', 'nouveau']
else
error('Unknown architecture @0@. Please pass -Ddri-drivers to set driver options. Patches gladly accepted to fix this.'.format(
host_machine.cpu_family()))
@ -198,7 +198,6 @@ endif
with_dri_i915 = dri_drivers.contains('i915')
with_dri_i965 = dri_drivers.contains('i965')
with_dri_r100 = dri_drivers.contains('r100')
with_dri_r200 = dri_drivers.contains('r200')
with_dri_nouveau = dri_drivers.contains('nouveau')
@ -1598,7 +1597,7 @@ _drm_ver = '2.4.109'
_libdrm_checks = [
['intel', with_dri_i915 or with_gallium_i915],
['amdgpu', (with_amd_vk and not with_platform_windows) or with_gallium_radeonsi],
['radeon', (with_gallium_radeonsi or with_dri_r100 or with_dri_r200 or
['radeon', (with_gallium_radeonsi or with_dri_r200 or
with_gallium_r300 or with_gallium_r600)],
['nouveau', (with_gallium_nouveau or with_dri_nouveau)],
]

View File

@ -55,7 +55,7 @@ option(
'dri-drivers',
type : 'array',
value : ['auto'],
choices : ['auto', 'i915', 'i965', 'r100', 'r200', 'nouveau'],
choices : ['auto', 'i915', 'i965', 'r200', 'nouveau'],
description : 'List of dri drivers to build. If this is set to auto all drivers applicable to the target OS/architecture will be built'
)
option(

View File

@ -32,12 +32,6 @@ static const int crocus_chip_ids[] = {
#undef CHIPSET
};
static const int r100_chip_ids[] = {
#define CHIPSET(chip, name, family) chip,
#include "pci_ids/radeon_pci_ids.h"
#undef CHIPSET
};
static const int r200_chip_ids[] = {
#define CHIPSET(chip, name, family) chip,
#include "pci_ids/r200_pci_ids.h"
@ -83,7 +77,6 @@ static const struct {
{ 0x8086, "i965", i965_chip_ids, ARRAY_SIZE(i965_chip_ids) },
{ 0x8086, "crocus", crocus_chip_ids, ARRAY_SIZE(crocus_chip_ids) },
{ 0x8086, "iris", NULL, -1, is_kernel_i915 },
{ 0x1002, "radeon", r100_chip_ids, ARRAY_SIZE(r100_chip_ids) },
{ 0x1002, "r200", r200_chip_ids, ARRAY_SIZE(r200_chip_ids) },
{ 0x1002, "r300", r300_chip_ids, ARRAY_SIZE(r300_chip_ids) },
{ 0x1002, "r600", r600_chip_ids, ARRAY_SIZE(r600_chip_ids) },

View File

@ -33,11 +33,6 @@ if with_dri_i965
_dri_drivers += libi965
_dri_link += 'i965_dri.so'
endif
if with_dri_r100
subdir('radeon')
_dri_drivers += libr100
_dri_link += 'radeon_dri.so'
endif
if with_dri_r200
subdir('r200')
_dri_drivers += libr200

View File

@ -1,84 +0,0 @@
# Copyright © 2017 Intel Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
files_r100 = files(
'radeon_buffer_objects.c',
'radeon_buffer_objects.h',
'radeon_cmdbuf.h',
'radeon_common.c',
'radeon_common_context.c',
'radeon_common_context.h',
'radeon_common.h',
'radeon_debug.c',
'radeon_debug.h',
'radeon_dma.c',
'radeon_dma.h',
'radeon_fbo.c',
'radeon_fog.c',
'radeon_fog.h',
'radeon_mipmap_tree.c',
'radeon_mipmap_tree.h',
'radeon_pixel_read.c',
'radeon_queryobj.c',
'radeon_queryobj.h',
'radeon_span.c',
'radeon_span.h',
'radeon_tex_copy.c',
'radeon_texture.c',
'radeon_texture.h',
'radeon_tile.c',
'radeon_tile.h',
'radeon_blit.c',
'radeon_blit.h',
'radeon_context.c',
'radeon_context.h',
'radeon_chipset.h',
'radeon_ioctl.c',
'radeon_ioctl.h',
'radeon_maos.c',
'radeon_maos.h',
'radeon_maos_vbtmp.h',
'radeon_sanity.c',
'radeon_sanity.h',
'radeon_screen.c',
'radeon_screen.h',
'radeon_state.c',
'radeon_state.h',
'radeon_state_init.c',
'radeon_swtcl.c',
'radeon_swtcl.h',
'radeon_tcl.c',
'radeon_tcl.h',
'radeon_tex.c',
'radeon_tex.h',
'radeon_texstate.c',
'server/radeon_reg.h',
)
libr100 = static_library(
'r100',
files_r100,
include_directories : [
inc_include, inc_src, inc_mapi, inc_mesa, inc_gallium, inc_gallium_aux, inc_dri_common, inc_util, include_directories('server'),
],
c_args : ['-DRADEON_R100'],
gnu_symbol_visibility : 'hidden',
dependencies : [dep_libdrm, dep_libdrm_radeon, idep_mesautil],
)

View File

@ -1,420 +0,0 @@
/*
* Copyright (C) 2010 Advanced Micro Devices, Inc.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "radeon_common.h"
#include "radeon_context.h"
#include "radeon_blit.h"
#include "radeon_tex.h"
static inline uint32_t cmdpacket0(struct radeon_screen *rscrn,
int reg, int count)
{
if (count)
return CP_PACKET0(reg, count - 1);
return CP_PACKET2;
}
/* common formats supported as both textures and render targets */
unsigned r100_check_blit(mesa_format mesa_format, uint32_t dst_pitch)
{
/* XXX others? */
switch (mesa_format) {
#if UTIL_ARCH_LITTLE_ENDIAN
case MESA_FORMAT_B8G8R8A8_UNORM:
case MESA_FORMAT_B8G8R8X8_UNORM:
case MESA_FORMAT_B5G6R5_UNORM:
case MESA_FORMAT_B4G4R4A4_UNORM:
case MESA_FORMAT_B5G5R5A1_UNORM:
#else
case MESA_FORMAT_A8R8G8B8_UNORM:
case MESA_FORMAT_X8R8G8B8_UNORM:
case MESA_FORMAT_R5G6B5_UNORM:
case MESA_FORMAT_A4R4G4B4_UNORM:
case MESA_FORMAT_A1R5G5B5_UNORM:
#endif
case MESA_FORMAT_A_UNORM8:
case MESA_FORMAT_L_UNORM8:
case MESA_FORMAT_I_UNORM8:
break;
default:
return 0;
}
/* Rendering to small buffer doesn't work.
* Looks like a hw limitation.
*/
if (dst_pitch < 32)
return 0;
/* ??? */
if (_mesa_get_format_bits(mesa_format, GL_DEPTH_BITS) > 0)
return 0;
return 1;
}
static inline void emit_vtx_state(struct r100_context *r100)
{
BATCH_LOCALS(&r100->radeon);
BEGIN_BATCH(8);
if (r100->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL) {
OUT_BATCH_REGVAL(RADEON_SE_CNTL_STATUS, 0);
} else {
OUT_BATCH_REGVAL(RADEON_SE_CNTL_STATUS, RADEON_TCL_BYPASS);
}
OUT_BATCH_REGVAL(RADEON_SE_COORD_FMT, (RADEON_VTX_XY_PRE_MULT_1_OVER_W0 |
RADEON_TEX1_W_ROUTING_USE_W0));
OUT_BATCH_REGVAL(RADEON_SE_VTX_FMT, RADEON_SE_VTX_FMT_XY | RADEON_SE_VTX_FMT_ST0);
OUT_BATCH_REGVAL(RADEON_SE_CNTL, (RADEON_DIFFUSE_SHADE_GOURAUD |
RADEON_BFACE_SOLID |
RADEON_FFACE_SOLID |
RADEON_VTX_PIX_CENTER_OGL |
RADEON_ROUND_MODE_ROUND |
RADEON_ROUND_PREC_4TH_PIX));
END_BATCH();
}
static void inline emit_tx_setup(struct r100_context *r100,
mesa_format mesa_format,
struct radeon_bo *bo,
intptr_t offset,
unsigned width,
unsigned height,
unsigned pitch)
{
uint32_t txformat = RADEON_TXFORMAT_NON_POWER2;
BATCH_LOCALS(&r100->radeon);
assert(width <= 2048);
assert(height <= 2048);
assert(offset % 32 == 0);
txformat |= tx_table[mesa_format].format;
if (bo->flags & RADEON_BO_FLAGS_MACRO_TILE)
offset |= RADEON_TXO_MACRO_TILE;
if (bo->flags & RADEON_BO_FLAGS_MICRO_TILE)
offset |= RADEON_TXO_MICRO_TILE_X2;
BEGIN_BATCH(18);
OUT_BATCH_REGVAL(RADEON_PP_CNTL, RADEON_TEX_0_ENABLE | RADEON_TEX_BLEND_0_ENABLE);
OUT_BATCH_REGVAL(RADEON_PP_TXCBLEND_0, (RADEON_COLOR_ARG_A_ZERO |
RADEON_COLOR_ARG_B_ZERO |
RADEON_COLOR_ARG_C_T0_COLOR |
RADEON_BLEND_CTL_ADD |
RADEON_CLAMP_TX));
OUT_BATCH_REGVAL(RADEON_PP_TXABLEND_0, (RADEON_ALPHA_ARG_A_ZERO |
RADEON_ALPHA_ARG_B_ZERO |
RADEON_ALPHA_ARG_C_T0_ALPHA |
RADEON_BLEND_CTL_ADD |
RADEON_CLAMP_TX));
OUT_BATCH_REGVAL(RADEON_PP_TXFILTER_0, (RADEON_CLAMP_S_CLAMP_LAST |
RADEON_CLAMP_T_CLAMP_LAST |
RADEON_MAG_FILTER_NEAREST |
RADEON_MIN_FILTER_NEAREST));
OUT_BATCH_REGVAL(RADEON_PP_TXFORMAT_0, txformat);
OUT_BATCH_REGVAL(RADEON_PP_TEX_SIZE_0, ((width - 1) |
((height - 1) << RADEON_TEX_VSIZE_SHIFT)));
OUT_BATCH_REGVAL(RADEON_PP_TEX_PITCH_0, pitch * _mesa_get_format_bytes(mesa_format) - 32);
OUT_BATCH_REGSEQ(RADEON_PP_TXOFFSET_0, 1);
OUT_BATCH_RELOC(bo, offset, RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
END_BATCH();
}
static inline void emit_cb_setup(struct r100_context *r100,
struct radeon_bo *bo,
intptr_t offset,
mesa_format mesa_format,
unsigned pitch,
unsigned width,
unsigned height)
{
uint32_t dst_pitch = pitch;
uint32_t dst_format = 0;
BATCH_LOCALS(&r100->radeon);
/* XXX others? */
switch (mesa_format) {
/* The first of each pair is for little, the second for big endian. */
case MESA_FORMAT_B8G8R8A8_UNORM:
case MESA_FORMAT_A8R8G8B8_UNORM:
case MESA_FORMAT_B8G8R8X8_UNORM:
case MESA_FORMAT_X8R8G8B8_UNORM:
dst_format = RADEON_COLOR_FORMAT_ARGB8888;
break;
case MESA_FORMAT_B5G6R5_UNORM:
case MESA_FORMAT_R5G6B5_UNORM:
dst_format = RADEON_COLOR_FORMAT_RGB565;
break;
case MESA_FORMAT_B4G4R4A4_UNORM:
case MESA_FORMAT_A4R4G4B4_UNORM:
dst_format = RADEON_COLOR_FORMAT_ARGB4444;
break;
case MESA_FORMAT_B5G5R5A1_UNORM:
case MESA_FORMAT_A1R5G5B5_UNORM:
dst_format = RADEON_COLOR_FORMAT_ARGB1555;
break;
case MESA_FORMAT_A_UNORM8:
case MESA_FORMAT_L_UNORM8:
case MESA_FORMAT_I_UNORM8:
dst_format = RADEON_COLOR_FORMAT_RGB8;
break;
default:
break;
}
if (bo->flags & RADEON_BO_FLAGS_MACRO_TILE)
dst_pitch |= RADEON_COLOR_TILE_ENABLE;
if (bo->flags & RADEON_BO_FLAGS_MICRO_TILE)
dst_pitch |= RADEON_COLOR_MICROTILE_ENABLE;
BEGIN_BATCH(18);
OUT_BATCH_REGVAL(RADEON_RE_TOP_LEFT, 0);
OUT_BATCH_REGVAL(RADEON_RE_WIDTH_HEIGHT, (((width - 1) << RADEON_RE_WIDTH_SHIFT) |
((height - 1) << RADEON_RE_HEIGHT_SHIFT)));
OUT_BATCH_REGVAL(RADEON_RB3D_PLANEMASK, 0xffffffff);
OUT_BATCH_REGVAL(RADEON_RB3D_BLENDCNTL, RADEON_SRC_BLEND_GL_ONE | RADEON_DST_BLEND_GL_ZERO);
OUT_BATCH_REGVAL(RADEON_RB3D_CNTL, dst_format);
OUT_BATCH_REGSEQ(RADEON_RB3D_COLOROFFSET, 1);
OUT_BATCH_RELOC(bo, offset, 0, RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0);
OUT_BATCH_REGSEQ(RADEON_RB3D_COLORPITCH, 1);
OUT_BATCH_RELOC(bo, dst_pitch, 0, RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0);
END_BATCH();
}
static GLboolean validate_buffers(struct r100_context *r100,
struct radeon_bo *src_bo,
struct radeon_bo *dst_bo)
{
int ret;
radeon_cs_space_reset_bos(r100->radeon.cmdbuf.cs);
ret = radeon_cs_space_check_with_bo(r100->radeon.cmdbuf.cs,
src_bo, RADEON_GEM_DOMAIN_VRAM | RADEON_GEM_DOMAIN_GTT, 0);
if (ret)
return GL_FALSE;
ret = radeon_cs_space_check_with_bo(r100->radeon.cmdbuf.cs,
dst_bo, 0, RADEON_GEM_DOMAIN_VRAM | RADEON_GEM_DOMAIN_GTT);
if (ret)
return GL_FALSE;
return GL_TRUE;
}
/**
* Calculate texcoords for given image region.
* Output values are [minx, maxx, miny, maxy]
*/
static inline void calc_tex_coords(float img_width, float img_height,
float x, float y,
float reg_width, float reg_height,
unsigned flip_y, float *buf)
{
buf[0] = x / img_width;
buf[1] = buf[0] + reg_width / img_width;
buf[2] = y / img_height;
buf[3] = buf[2] + reg_height / img_height;
if (flip_y)
{
buf[2] = 1.0 - buf[2];
buf[3] = 1.0 - buf[3];
}
}
static inline void emit_draw_packet(struct r100_context *r100,
unsigned src_width, unsigned src_height,
unsigned src_x_offset, unsigned src_y_offset,
unsigned dst_x_offset, unsigned dst_y_offset,
unsigned reg_width, unsigned reg_height,
unsigned flip_y)
{
float texcoords[4];
float verts[12];
BATCH_LOCALS(&r100->radeon);
calc_tex_coords(src_width, src_height,
src_x_offset, src_y_offset,
reg_width, reg_height,
flip_y, texcoords);
verts[0] = dst_x_offset;
verts[1] = dst_y_offset + reg_height;
verts[2] = texcoords[0];
verts[3] = texcoords[3];
verts[4] = dst_x_offset + reg_width;
verts[5] = dst_y_offset + reg_height;
verts[6] = texcoords[1];
verts[7] = texcoords[3];
verts[8] = dst_x_offset + reg_width;
verts[9] = dst_y_offset;
verts[10] = texcoords[1];
verts[11] = texcoords[2];
BEGIN_BATCH(15);
OUT_BATCH(RADEON_CP_PACKET3_3D_DRAW_IMMD | (13 << 16));
OUT_BATCH(RADEON_CP_VC_FRMT_XY | RADEON_CP_VC_FRMT_ST0);
OUT_BATCH(RADEON_CP_VC_CNTL_PRIM_WALK_RING |
RADEON_CP_VC_CNTL_PRIM_TYPE_RECT_LIST |
RADEON_CP_VC_CNTL_MAOS_ENABLE |
RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
(3 << 16));
OUT_BATCH_TABLE(verts, 12);
END_BATCH();
}
/**
* Copy a region of [@a width x @a height] pixels from source buffer
* to destination buffer.
* @param[in] r100 r100 context
* @param[in] src_bo source radeon buffer object
* @param[in] src_offset offset of the source image in the @a src_bo
* @param[in] src_mesaformat source image format
* @param[in] src_pitch aligned source image width
* @param[in] src_width source image width
* @param[in] src_height source image height
* @param[in] src_x_offset x offset in the source image
* @param[in] src_y_offset y offset in the source image
* @param[in] dst_bo destination radeon buffer object
* @param[in] dst_offset offset of the destination image in the @a dst_bo
* @param[in] dst_mesaformat destination image format
* @param[in] dst_pitch aligned destination image width
* @param[in] dst_width destination image width
* @param[in] dst_height destination image height
* @param[in] dst_x_offset x offset in the destination image
* @param[in] dst_y_offset y offset in the destination image
* @param[in] width region width
* @param[in] height region height
* @param[in] flip_y set if y coords of the source image need to be flipped
*/
unsigned r100_blit(struct gl_context *ctx,
struct radeon_bo *src_bo,
intptr_t src_offset,
mesa_format src_mesaformat,
unsigned src_pitch,
unsigned src_width,
unsigned src_height,
unsigned src_x_offset,
unsigned src_y_offset,
struct radeon_bo *dst_bo,
intptr_t dst_offset,
mesa_format dst_mesaformat,
unsigned dst_pitch,
unsigned dst_width,
unsigned dst_height,
unsigned dst_x_offset,
unsigned dst_y_offset,
unsigned reg_width,
unsigned reg_height,
unsigned flip_y)
{
struct r100_context *r100 = R100_CONTEXT(ctx);
if (!r100_check_blit(dst_mesaformat, dst_pitch))
return GL_FALSE;
/* Make sure that colorbuffer has even width - hw limitation */
if (dst_pitch % 2 > 0)
++dst_pitch;
/* Need to clamp the region size to make sure
* we don't read outside of the source buffer
* or write outside of the destination buffer.
*/
if (reg_width + src_x_offset > src_width)
reg_width = src_width - src_x_offset;
if (reg_height + src_y_offset > src_height)
reg_height = src_height - src_y_offset;
if (reg_width + dst_x_offset > dst_width)
reg_width = dst_width - dst_x_offset;
if (reg_height + dst_y_offset > dst_height)
reg_height = dst_height - dst_y_offset;
if (src_bo == dst_bo) {
return GL_FALSE;
}
if (src_offset % 32 || dst_offset % 32) {
return GL_FALSE;
}
if (0) {
fprintf(stderr, "src: size [%d x %d], pitch %d, offset %zd "
"offset [%d x %d], format %s, bo %p\n",
src_width, src_height, src_pitch, src_offset,
src_x_offset, src_y_offset,
_mesa_get_format_name(src_mesaformat),
src_bo);
fprintf(stderr, "dst: pitch %d offset %zd, offset[%d x %d], format %s, bo %p\n",
dst_pitch, dst_offset, dst_x_offset, dst_y_offset,
_mesa_get_format_name(dst_mesaformat), dst_bo);
fprintf(stderr, "region: %d x %d\n", reg_width, reg_height);
}
/* Flush is needed to make sure that source buffer has correct data */
radeonFlush(ctx, 0);
rcommonEnsureCmdBufSpace(&r100->radeon, 59, __func__);
if (!validate_buffers(r100, src_bo, dst_bo))
return GL_FALSE;
/* 8 */
emit_vtx_state(r100);
/* 18 */
emit_tx_setup(r100, src_mesaformat, src_bo, src_offset, src_width, src_height, src_pitch);
/* 18 */
emit_cb_setup(r100, dst_bo, dst_offset, dst_mesaformat, dst_pitch, dst_width, dst_height);
/* 15 */
emit_draw_packet(r100, src_width, src_height,
src_x_offset, src_y_offset,
dst_x_offset, dst_y_offset,
reg_width, reg_height,
flip_y);
radeonFlush(ctx, 0);
/* We submitted those packets outside our state atom mechanism. Thus
* make sure they are all resubmitted the next time. */
r100->hw.ctx.dirty = GL_TRUE;
r100->hw.msk.dirty = GL_TRUE;
r100->hw.set.dirty = GL_TRUE;
r100->hw.tex[0].dirty = GL_TRUE;
r100->hw.txr[0].dirty = GL_TRUE;
return GL_TRUE;
}

View File

@ -1,56 +0,0 @@
/*
* Copyright (C) 2010 Advanced Micro Devices, Inc.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef RADEON_BLIT_H
#define RADEON_BLIT_H
void r100_blit_init(struct r100_context *r100);
unsigned r100_check_blit(mesa_format mesa_format, uint32_t dst_pitch);
unsigned r100_blit(struct gl_context *ctx,
struct radeon_bo *src_bo,
intptr_t src_offset,
mesa_format src_mesaformat,
unsigned src_pitch,
unsigned src_width,
unsigned src_height,
unsigned src_x_offset,
unsigned src_y_offset,
struct radeon_bo *dst_bo,
intptr_t dst_offset,
mesa_format dst_mesaformat,
unsigned dst_pitch,
unsigned dst_width,
unsigned dst_height,
unsigned dst_x_offset,
unsigned dst_y_offset,
unsigned width,
unsigned height,
unsigned flip_y);
#endif // RADEON_BLIT_H

View File

@ -1,238 +0,0 @@
/*
* Copyright 2009 Maciej Cencora <m.cencora@gmail.com>
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "main/mtypes.h"
#include "main/bufferobj.h"
#include "util/u_memory.h"
#include "radeon_common.h"
#include "radeon_buffer_objects.h"
struct radeon_buffer_object *
get_radeon_buffer_object(struct gl_buffer_object *obj)
{
return (struct radeon_buffer_object *) obj;
}
static struct gl_buffer_object *
radeonNewBufferObject(struct gl_context * ctx,
GLuint name)
{
struct radeon_buffer_object *obj = CALLOC_STRUCT(radeon_buffer_object);
_mesa_initialize_buffer_object(ctx, &obj->Base, name);
obj->bo = NULL;
return &obj->Base;
}
/**
* Called via glDeleteBuffersARB().
*/
static void
radeonDeleteBufferObject(struct gl_context * ctx,
struct gl_buffer_object *obj)
{
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
int i;
for (i = 0; i < MAP_COUNT; i++) {
if (obj->Mappings[i].Pointer) {
radeon_bo_unmap(radeon_obj->bo);
}
}
if (radeon_obj->bo) {
radeon_bo_unref(radeon_obj->bo);
}
_mesa_delete_buffer_object(ctx, obj);
}
/**
* Allocate space for and store data in a buffer object. Any data that was
* previously stored in the buffer object is lost. If data is NULL,
* memory will be allocated, but no copy will occur.
* Called via ctx->Driver.BufferData().
* \return GL_TRUE for success, GL_FALSE if out of memory
*/
static GLboolean
radeonBufferData(struct gl_context * ctx,
GLenum target,
GLsizeiptrARB size,
const GLvoid * data,
GLenum usage,
GLbitfield storageFlags,
struct gl_buffer_object *obj)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
radeon_obj->Base.Size = size;
radeon_obj->Base.Usage = usage;
radeon_obj->Base.StorageFlags = storageFlags;
if (radeon_obj->bo != NULL) {
radeon_bo_unref(radeon_obj->bo);
radeon_obj->bo = NULL;
}
if (size != 0) {
radeon_obj->bo = radeon_bo_open(radeon->radeonScreen->bom,
0,
size,
ctx->Const.MinMapBufferAlignment,
RADEON_GEM_DOMAIN_GTT,
0);
if (!radeon_obj->bo)
return GL_FALSE;
if (data != NULL) {
radeon_bo_map(radeon_obj->bo, GL_TRUE);
memcpy(radeon_obj->bo->ptr, data, size);
radeon_bo_unmap(radeon_obj->bo);
}
}
return GL_TRUE;
}
/**
* Replace data in a subrange of buffer object. If the data range
* specified by size + offset extends beyond the end of the buffer or
* if data is NULL, no copy is performed.
* Called via glBufferSubDataARB().
*/
static void
radeonBufferSubData(struct gl_context * ctx,
GLintptrARB offset,
GLsizeiptrARB size,
const GLvoid * data,
struct gl_buffer_object *obj)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
if (radeon_bo_is_referenced_by_cs(radeon_obj->bo, radeon->cmdbuf.cs)) {
radeon_firevertices(radeon);
}
radeon_bo_map(radeon_obj->bo, GL_TRUE);
memcpy(radeon_obj->bo->ptr + offset, data, size);
radeon_bo_unmap(radeon_obj->bo);
}
/**
* Called via glGetBufferSubDataARB()
*/
static void
radeonGetBufferSubData(struct gl_context * ctx,
GLintptrARB offset,
GLsizeiptrARB size,
GLvoid * data,
struct gl_buffer_object *obj)
{
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
radeon_bo_map(radeon_obj->bo, GL_FALSE);
memcpy(data, radeon_obj->bo->ptr + offset, size);
radeon_bo_unmap(radeon_obj->bo);
}
/**
* Called via glMapBuffer() and glMapBufferRange()
*/
static void *
radeonMapBufferRange(struct gl_context * ctx,
GLintptr offset, GLsizeiptr length,
GLbitfield access, struct gl_buffer_object *obj,
gl_map_buffer_index index)
{
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
const GLboolean write_only =
(access & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) == GL_MAP_WRITE_BIT;
if (write_only) {
ctx->Driver.Flush(ctx, 0);
}
if (radeon_obj->bo == NULL) {
obj->Mappings[index].Pointer = NULL;
return NULL;
}
obj->Mappings[index].Offset = offset;
obj->Mappings[index].Length = length;
obj->Mappings[index].AccessFlags = access;
radeon_bo_map(radeon_obj->bo, write_only);
obj->Mappings[index].Pointer = radeon_obj->bo->ptr + offset;
return obj->Mappings[index].Pointer;
}
/**
* Called via glUnmapBufferARB()
*/
static GLboolean
radeonUnmapBuffer(struct gl_context * ctx,
struct gl_buffer_object *obj,
gl_map_buffer_index index)
{
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
if (radeon_obj->bo != NULL) {
radeon_bo_unmap(radeon_obj->bo);
}
obj->Mappings[index].Pointer = NULL;
obj->Mappings[index].Offset = 0;
obj->Mappings[index].Length = 0;
return GL_TRUE;
}
void
radeonInitBufferObjectFuncs(struct dd_function_table *functions)
{
functions->NewBufferObject = radeonNewBufferObject;
functions->DeleteBuffer = radeonDeleteBufferObject;
functions->BufferData = radeonBufferData;
functions->BufferSubData = radeonBufferSubData;
functions->GetBufferSubData = radeonGetBufferSubData;
functions->MapBufferRange = radeonMapBufferRange;
functions->UnmapBuffer = radeonUnmapBuffer;
}

View File

@ -1,52 +0,0 @@
/*
* Copyright 2009 Maciej Cencora <m.cencora@gmail.com>
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef RADEON_BUFFER_OBJECTS_H
#define RADEON_BUFFER_OBJECTS_H
#include "main/mtypes.h"
struct radeon_bo;
/**
* Radeon vertex/pixel buffer object, derived from Mesa's gl_buffer_object.
*/
struct radeon_buffer_object
{
struct gl_buffer_object Base;
struct radeon_bo *bo;
};
struct radeon_buffer_object *
get_radeon_buffer_object(struct gl_buffer_object *obj);
/**
* Hook the bufferobject implementation into mesa:
*/
void radeonInitBufferObjectFuncs(struct dd_function_table *functions);
#endif

View File

@ -1,41 +0,0 @@
#ifndef _RADEON_CHIPSET_H
#define _RADEON_CHIPSET_H
/* General chip classes:
* r100 includes R100, RV100, RV200, RS100, RS200, RS250.
* r200 includes R200, RV250, RV280, RS300.
* (RS* denotes IGP)
*/
enum {
#define CHIPSET(id, name, family) PCI_CHIP_##name = id,
#if defined(RADEON_R100)
#include "pci_ids/radeon_pci_ids.h"
#elif defined(RADEON_R200)
#include "pci_ids/r200_pci_ids.h"
#endif
#undef CHIPSET
};
enum {
#if defined(RADEON_R100)
CHIP_FAMILY_R100,
CHIP_FAMILY_RV100,
CHIP_FAMILY_RS100,
CHIP_FAMILY_RV200,
CHIP_FAMILY_RS200,
#elif defined(RADEON_R200)
CHIP_FAMILY_R200,
CHIP_FAMILY_RV250,
CHIP_FAMILY_RS300,
CHIP_FAMILY_RV280,
#endif
CHIP_FAMILY_LAST
};
#define RADEON_CHIPSET_TCL (1 << 0) /* tcl support - any radeon */
#define RADEON_CHIPSET_BROKEN_STENCIL (1 << 1) /* r100 stencil bug */
#define R200_CHIPSET_YCBCR_BROKEN (1 << 2) /* r200 ycbcr bug */
#define RADEON_CHIPSET_DEPTH_ALWAYS_TILED (1 << 3) /* M7 and R200s */
#endif /* _RADEON_CHIPSET_H */

View File

@ -1,107 +0,0 @@
#ifndef COMMON_CMDBUF_H
#define COMMON_CMDBUF_H
GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller);
int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller);
int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller);
void rcommonInitCmdBuf(radeonContextPtr rmesa);
void rcommonDestroyCmdBuf(radeonContextPtr rmesa);
void rcommonBeginBatch(radeonContextPtr rmesa,
int n,
const char *file,
const char *function,
int line);
/* +r6/r7 : code here moved */
#define CP_PACKET2 (2 << 30)
#define CP_PACKET0(reg, n) (RADEON_CP_PACKET0 | ((n)<<16) | ((reg)>>2))
#define CP_PACKET0_ONE(reg, n) (RADEON_CP_PACKET0 | RADEON_CP_PACKET0_ONE_REG_WR | ((n)<<16) | ((reg)>>2))
#define CP_PACKET3(pkt, n) (RADEON_CP_PACKET3 | (pkt) | ((n) << 16))
/**
* Every function writing to the command buffer needs to declare this
* to get the necessary local variables.
*/
#define BATCH_LOCALS(rmesa) \
const radeonContextPtr b_l_rmesa = rmesa
/**
* Prepare writing n dwords to the command buffer. Does not cause automatic
* state emits.
*/
#define BEGIN_BATCH(n) rcommonBeginBatch(b_l_rmesa, n, __FILE__, __func__, __LINE__)
/**
* Write one dword to the command buffer.
*/
#define OUT_BATCH(data) \
do { \
radeon_cs_write_dword(b_l_rmesa->cmdbuf.cs, data);\
} while(0)
/**
* Write a relocated dword to the command buffer.
*/
#define OUT_BATCH_RELOC(bo, offset, rd, wd, flags) \
do { \
int __offset = (offset); \
if (0 && __offset) { \
fprintf(stderr, "(%s:%s:%d) offset : %d\n", \
__FILE__, __func__, __LINE__, __offset); \
} \
radeon_cs_write_dword(b_l_rmesa->cmdbuf.cs, __offset); \
radeon_cs_write_reloc(b_l_rmesa->cmdbuf.cs, \
bo, rd, wd, flags); \
} while(0)
/**
* Write n dwords from ptr to the command buffer.
*/
#define OUT_BATCH_TABLE(ptr,n) \
do { \
radeon_cs_write_table(b_l_rmesa->cmdbuf.cs, (ptr), (n));\
} while(0)
/**
* Finish writing dwords to the command buffer.
* The number of (direct or indirect) OUT_BATCH calls between the previous
* BEGIN_BATCH and END_BATCH must match the number specified at BEGIN_BATCH time.
*/
#define END_BATCH() \
do { \
radeon_cs_end(b_l_rmesa->cmdbuf.cs, __FILE__, __func__, __LINE__);\
} while(0)
/**
* After the last END_BATCH() of rendering, this indicates that flushing
* the command buffer now is okay.
*/
#define COMMIT_BATCH() \
do { \
} while(0)
/** Single register write to command buffer; requires 2 dwords. */
#define OUT_BATCH_REGVAL(reg, val) \
OUT_BATCH(cmdpacket0(b_l_rmesa->radeonScreen, (reg), 1)); \
OUT_BATCH((val))
/** Continuous register range write to command buffer; requires 1 dword,
* expects count dwords afterwards for register contents. */
#define OUT_BATCH_REGSEQ(reg, count) \
OUT_BATCH(cmdpacket0(b_l_rmesa->radeonScreen, (reg), (count)))
/* +r6/r7 : code here moved */
/* Fire the buffered vertices no matter what.
*/
static inline void radeon_firevertices(radeonContextPtr radeon)
{
if (radeon->cmdbuf.cs->cdw || radeon->dma.flush )
radeon->glCtx.Driver.Flush(&radeon->glCtx, 0); /* +r6/r7 */
}
#endif

View File

@ -1,726 +0,0 @@
/**************************************************************************
Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
The Weather Channel (TM) funded Tungsten Graphics to develop the
initial release of the Radeon 8500 driver under the XFree86 license.
This notice must be preserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Keith Whitwell <keithw@vmware.com>
*/
/*
- Scissor implementation
- buffer swap/copy ioctls
- finish/flush
- state emission
- cmdbuffer management
*/
#include <errno.h>
#include "main/glheader.h"
#include "main/context.h"
#include "main/enums.h"
#include "main/fbobject.h"
#include "main/framebuffer.h"
#include "main/renderbuffer.h"
#include "drivers/common/meta.h"
#include "radeon_common.h"
#include "radeon_drm.h"
#include "radeon_queryobj.h"
/**
* Enable verbose debug output for emit code.
* 0 no output
* 1 most output
* 2 also print state alues
*/
#define RADEON_CMDBUF 0
/* =============================================================
* Scissoring
*/
/**
* Update cliprects and scissors.
*/
void radeonSetCliprects(radeonContextPtr radeon)
{
__DRIdrawable *const drawable = radeon_get_drawable(radeon);
__DRIdrawable *const readable = radeon_get_readable(radeon);
if(drawable == NULL && readable == NULL)
return;
struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
if ((draw_rfb->base.Width != drawable->w) ||
(draw_rfb->base.Height != drawable->h)) {
_mesa_resize_framebuffer(&radeon->glCtx, &draw_rfb->base,
drawable->w, drawable->h);
}
if (drawable != readable) {
if ((read_rfb->base.Width != readable->w) ||
(read_rfb->base.Height != readable->h)) {
_mesa_resize_framebuffer(&radeon->glCtx, &read_rfb->base,
readable->w, readable->h);
}
}
if (radeon->state.scissor.enabled)
radeonUpdateScissor(&radeon->glCtx);
}
void radeonUpdateScissor( struct gl_context *ctx )
{
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
GLint x = ctx->Scissor.ScissorArray[0].X, y = ctx->Scissor.ScissorArray[0].Y;
GLsizei w = ctx->Scissor.ScissorArray[0].Width, h = ctx->Scissor.ScissorArray[0].Height;
int x1, y1, x2, y2;
int min_x, min_y, max_x, max_y;
if (!ctx->DrawBuffer)
return;
min_x = min_y = 0;
max_x = ctx->DrawBuffer->Width - 1;
max_y = ctx->DrawBuffer->Height - 1;
if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
x1 = x;
y1 = ctx->DrawBuffer->Height - (y + h);
x2 = x + w - 1;
y2 = y1 + h - 1;
} else {
x1 = x;
y1 = y;
x2 = x + w - 1;
y2 = y + h - 1;
}
rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
if (rmesa->vtbl.update_scissor)
rmesa->vtbl.update_scissor(ctx);
}
/* =============================================================
* Scissoring
*/
void radeonScissor(struct gl_context *ctx)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
if (ctx->Scissor.EnableFlags) {
/* We don't pipeline cliprect changes */
radeon_firevertices(radeon);
radeonUpdateScissor(ctx);
}
}
/* ================================================================
* SwapBuffers with client-side throttling
*/
uint32_t radeonGetAge(radeonContextPtr radeon)
{
drm_radeon_getparam_t gp;
int ret;
uint32_t age;
gp.param = RADEON_PARAM_LAST_CLEAR;
gp.value = (int *)&age;
ret = drmCommandWriteRead(radeon->radeonScreen->driScreen->fd, DRM_RADEON_GETPARAM,
&gp, sizeof(gp));
if (ret) {
fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __func__,
ret);
exit(1);
}
return age;
}
void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
*rrbColor = NULL;
uint32_t offset = 0;
if (!fb) {
/* this can happen during the initial context initialization */
return;
}
/* radeons only handle 1 color draw so far */
if (fb->_NumColorDrawBuffers != 1) {
radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
return;
}
/* Do this here, note core Mesa, since this function is called from
* many places within the driver.
*/
if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
/* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
_mesa_update_framebuffer(ctx, ctx->ReadBuffer, ctx->DrawBuffer);
/* this updates the DrawBuffer's Width/Height if it's a FBO */
_mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer);
}
if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
/* this may occur when we're called by glBindFrameBuffer() during
* the process of someone setting up renderbuffers, etc.
*/
/*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
return;
}
if (fb->Name) {
;/* do something depthy/stencily TODO */
}
/* none */
if (fb->Name == 0) {
if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
radeon->front_cliprects = GL_TRUE;
} else {
rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
radeon->front_cliprects = GL_FALSE;
}
} else {
/* user FBO in theory */
struct radeon_renderbuffer *rrb;
rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
if (rrb) {
offset = rrb->draw_offset;
rrbColor = rrb;
}
}
if (rrbColor == NULL)
radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
else
radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
if (fb->Attachment[BUFFER_DEPTH].Renderbuffer) {
rrbDepth = radeon_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
if (rrbDepth && rrbDepth->bo) {
radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
} else {
radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
}
} else {
radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
rrbDepth = NULL;
}
if (fb->Attachment[BUFFER_STENCIL].Renderbuffer) {
rrbStencil = radeon_renderbuffer(fb->Attachment[BUFFER_STENCIL].Renderbuffer);
if (rrbStencil && rrbStencil->bo) {
radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
/* need to re-compute stencil hw state */
if (!rrbDepth)
rrbDepth = rrbStencil;
} else {
radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
}
} else {
radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
if (ctx->Driver.Enable != NULL)
ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
else
ctx->NewState |= _NEW_STENCIL;
}
/* Update culling direction which changes depending on the
* orientation of the buffer:
*/
if (ctx->Driver.FrontFace)
ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
else
ctx->NewState |= _NEW_POLYGON;
/*
* Update depth test state
*/
if (ctx->Driver.Enable) {
ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
(ctx->Depth.Test && fb->Visual.depthBits > 0));
ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
(ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
} else {
ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
}
_mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base.Base);
_mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base.Base);
radeon->state.color.draw_offset = offset;
ctx->NewState |= _NEW_VIEWPORT;
/* Set state we know depends on drawable parameters:
*/
radeonUpdateScissor(ctx);
radeon->NewGLState |= _NEW_SCISSOR;
if (ctx->Driver.DepthRange)
ctx->Driver.DepthRange(ctx);
/* Update culling direction which changes depending on the
* orientation of the buffer:
*/
if (ctx->Driver.FrontFace)
ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
else
ctx->NewState |= _NEW_POLYGON;
}
/**
* Called via glDrawBuffer.
*/
void radeonDrawBuffer(struct gl_context *ctx)
{
if (RADEON_DEBUG & RADEON_DRI)
fprintf(stderr, "%s\n", __func__);
if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer)) {
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
/* If we might be front-buffer rendering on this buffer for
* the first time, invalidate our DRI drawable so we'll ask
* for new buffers (including the fake front) before we start
* rendering again.
*/
radeon_update_renderbuffers(radeon->driContext,
radeon->driContext->driDrawablePriv,
GL_FALSE);
}
radeon_draw_buffer(ctx, ctx->DrawBuffer);
}
void radeonReadBuffer( struct gl_context *ctx, GLenum mode )
{
if (_mesa_is_front_buffer_reading(ctx->ReadBuffer)) {
struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
radeon_update_renderbuffers(rmesa->driContext,
rmesa->driContext->driReadablePriv, GL_FALSE);
}
/* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
if (ctx->ReadBuffer == ctx->DrawBuffer) {
/* This will update FBO completeness status.
* A framebuffer will be incomplete if the GL_READ_BUFFER setting
* refers to a missing renderbuffer. Calling glReadBuffer can set
* that straight and can make the drawing buffer complete.
*/
radeon_draw_buffer(ctx, ctx->DrawBuffer);
}
}
void radeon_window_moved(radeonContextPtr radeon)
{
/* Cliprects has to be updated before doing anything else */
radeonSetCliprects(radeon);
}
void radeon_viewport(struct gl_context *ctx)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
__DRIcontext *driContext = radeon->driContext;
void (*old_viewport)(struct gl_context *ctx);
if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer)) {
ctx->Driver.Flush(ctx, 0);
}
radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
if (driContext->driDrawablePriv != driContext->driReadablePriv)
radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
}
old_viewport = ctx->Driver.Viewport;
ctx->Driver.Viewport = NULL;
radeon_window_moved(radeon);
radeon_draw_buffer(ctx, radeon->glCtx.DrawBuffer);
ctx->Driver.Viewport = old_viewport;
}
static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
{
int i, j, reg, count;
int dwords;
uint32_t packet0;
if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
return;
dwords = state->check(&radeon->glCtx, state);
fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
if (state->cmd && radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
if (dwords > state->cmd_size)
dwords = state->cmd_size;
for (i = 0; i < dwords;) {
packet0 = state->cmd[i];
reg = (packet0 & 0x1FFF) << 2;
count = ((packet0 & 0x3FFF0000) >> 16) + 1;
fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
state->name, i, reg, count);
++i;
for (j = 0; j < count && i < dwords; j++) {
fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
state->name, i, reg, state->cmd[i]);
reg += 4;
++i;
}
}
}
}
/**
* Count total size for next state emit.
**/
GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
{
struct radeon_state_atom *atom;
GLuint dwords = 0;
/* check if we are going to emit full state */
if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
if (!radeon->hw.is_dirty)
goto out;
foreach(atom, &radeon->hw.atomlist) {
if (atom->dirty) {
const GLuint atom_size = atom->check(&radeon->glCtx, atom);
dwords += atom_size;
if (RADEON_CMDBUF && atom_size) {
radeon_print_state_atom(radeon, atom);
}
}
}
} else {
foreach(atom, &radeon->hw.atomlist) {
const GLuint atom_size = atom->check(&radeon->glCtx, atom);
dwords += atom_size;
if (RADEON_CMDBUF && atom_size) {
radeon_print_state_atom(radeon, atom);
}
}
}
out:
radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
return dwords;
}
static inline void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
{
BATCH_LOCALS(radeon);
int dwords;
dwords = atom->check(&radeon->glCtx, atom);
if (dwords) {
radeon_print_state_atom(radeon, atom);
if (atom->emit) {
atom->emit(&radeon->glCtx, atom);
} else {
BEGIN_BATCH(dwords);
OUT_BATCH_TABLE(atom->cmd, dwords);
END_BATCH();
}
atom->dirty = GL_FALSE;
} else {
radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
}
}
static inline void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
{
struct radeon_state_atom *atom;
/* Emit actual atoms */
if (radeon->hw.all_dirty || emitAll) {
foreach(atom, &radeon->hw.atomlist)
radeon_emit_atom( radeon, atom );
} else {
foreach(atom, &radeon->hw.atomlist) {
if ( atom->dirty )
radeon_emit_atom( radeon, atom );
}
}
COMMIT_BATCH();
}
void radeonEmitState(radeonContextPtr radeon)
{
radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __func__);
if (radeon->vtbl.pre_emit_state)
radeon->vtbl.pre_emit_state(radeon);
/* this code used to return here but now it emits zbs */
if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
return;
if (!radeon->cmdbuf.cs->cdw) {
if (RADEON_DEBUG & RADEON_STATE)
fprintf(stderr, "Begin reemit state\n");
radeonEmitAtoms(radeon, GL_TRUE);
} else {
if (RADEON_DEBUG & RADEON_STATE)
fprintf(stderr, "Begin dirty state\n");
radeonEmitAtoms(radeon, GL_FALSE);
}
radeon->hw.is_dirty = GL_FALSE;
radeon->hw.all_dirty = GL_FALSE;
}
void radeonFlush(struct gl_context *ctx, unsigned gallium_flush_flags)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s %d\n", __func__, radeon->cmdbuf.cs->cdw);
/* okay if we have no cmds in the buffer &&
we have no DMA flush &&
we have no DMA buffer allocated.
then no point flushing anything at all.
*/
if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
goto flush_front;
if (radeon->dma.flush)
radeon->dma.flush( ctx );
if (radeon->cmdbuf.cs->cdw)
rcommonFlushCmdBuf(radeon, __func__);
flush_front:
if (_mesa_is_winsys_fbo(ctx->DrawBuffer) && radeon->front_buffer_dirty) {
__DRIscreen *const screen = radeon->radeonScreen->driScreen;
if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
&& (screen->dri2.loader->flushFrontBuffer != NULL)) {
__DRIdrawable * drawable = radeon_get_drawable(radeon);
/* We set the dirty bit in radeon_prepare_render() if we're
* front buffer rendering once we get there.
*/
radeon->front_buffer_dirty = GL_FALSE;
screen->dri2.loader->flushFrontBuffer(drawable, drawable->loaderPrivate);
}
}
}
/* Make sure all commands have been sent to the hardware and have
* completed processing.
*/
void radeonFinish(struct gl_context * ctx)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
struct gl_framebuffer *fb = ctx->DrawBuffer;
struct radeon_renderbuffer *rrb;
int i;
if (ctx->Driver.Flush)
ctx->Driver.Flush(ctx, 0); /* +r6/r7 */
for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
struct radeon_renderbuffer *rrb;
rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
if (rrb && rrb->bo)
radeon_bo_wait(rrb->bo);
}
rrb = radeon_get_depthbuffer(radeon);
if (rrb && rrb->bo)
radeon_bo_wait(rrb->bo);
}
/* cmdbuffer */
/**
* Send the current command buffer via ioctl to the hardware.
*/
int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
{
int ret = 0;
if (rmesa->cmdbuf.flushing) {
fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
exit(-1);
}
rmesa->cmdbuf.flushing = 1;
if (RADEON_DEBUG & RADEON_IOCTL) {
fprintf(stderr, "%s from %s\n", __func__, caller);
}
radeonEmitQueryEnd(&rmesa->glCtx);
if (rmesa->cmdbuf.cs->cdw) {
ret = radeon_cs_emit(rmesa->cmdbuf.cs);
rmesa->hw.all_dirty = GL_TRUE;
}
radeon_cs_erase(rmesa->cmdbuf.cs);
rmesa->cmdbuf.flushing = 0;
if (!rmesa->vtbl.revalidate_all_buffers(&rmesa->glCtx))
fprintf(stderr,"failed to revalidate buffers\n");
return ret;
}
int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
{
int ret;
radeonReleaseDmaRegions(rmesa);
ret = rcommonFlushCmdBufLocked(rmesa, caller);
if (ret) {
fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
"parse or rejected command stream. See dmesg "
"for more info.\n", ret);
exit(ret);
}
return ret;
}
/**
* Make sure that enough space is available in the command buffer
* by flushing if necessary.
*
* \param dwords The number of dwords we need to be free on the command buffer
*/
GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
{
if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
|| radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
/* If we try to flush empty buffer there is too big rendering operation. */
assert(rmesa->cmdbuf.cs->cdw);
rcommonFlushCmdBuf(rmesa, caller);
return GL_TRUE;
}
return GL_FALSE;
}
void rcommonInitCmdBuf(radeonContextPtr rmesa)
{
GLuint size;
struct drm_radeon_gem_info mminfo = { 0 };
int fd = rmesa->radeonScreen->driScreen->fd;
/* Initialize command buffer */
size = 256 * driQueryOptioni(&rmesa->optionCache,
"command_buffer_size");
if (size < 2 * rmesa->hw.max_state_size) {
size = 2 * rmesa->hw.max_state_size + 65535;
}
if (size > 64 * 256)
size = 64 * 256;
radeon_print(RADEON_CS, RADEON_VERBOSE,
"sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
radeon_print(RADEON_CS, RADEON_VERBOSE,
"sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
radeon_print(RADEON_CS, RADEON_VERBOSE,
"Allocating %d bytes command buffer (max state is %d bytes)\n",
size * 4, rmesa->hw.max_state_size * 4);
rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
if (rmesa->cmdbuf.csm == NULL) {
/* FIXME: fatal error */
return;
}
rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
assert(rmesa->cmdbuf.cs != NULL);
rmesa->cmdbuf.size = size;
radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
(void (*)(void *))rmesa->glCtx.Driver.Flush, &rmesa->glCtx);
if (!drmCommandWriteRead(fd, DRM_RADEON_GEM_INFO,
&mminfo, sizeof(mminfo))) {
radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM,
mminfo.vram_visible);
radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT,
mminfo.gart_size);
}
}
/**
* Destroy the command buffer
*/
void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
{
radeon_cs_destroy(rmesa->cmdbuf.cs);
radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
}
void rcommonBeginBatch(radeonContextPtr rmesa, int n,
const char *file,
const char *function,
int line)
{
radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
n, rmesa->cmdbuf.cs->cdw, function, line);
}
void radeonUserClear(struct gl_context *ctx, GLuint mask)
{
_mesa_meta_Clear(ctx, mask);
}

View File

@ -1,87 +0,0 @@
#ifndef COMMON_MISC_H
#define COMMON_MISC_H
#include "radeon_common_context.h"
#include "radeon_dma.h"
#include "radeon_texture.h"
void radeonUserClear(struct gl_context *ctx, GLuint mask);
void radeonSetCliprects(radeonContextPtr radeon);
void radeonUpdateScissor( struct gl_context *ctx );
void radeonScissor(struct gl_context *ctx);
extern uint32_t radeonGetAge(radeonContextPtr radeon);
void radeonFlush(struct gl_context *ctx, unsigned gallium_flush_flags);
void radeonFinish(struct gl_context * ctx);
void radeonEmitState(radeonContextPtr radeon);
GLuint radeonCountStateEmitSize(radeonContextPtr radeon);
void radeon_clear_tris(struct gl_context *ctx, GLbitfield mask);
void radeon_window_moved(radeonContextPtr radeon);
void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb);
void radeonDrawBuffer(struct gl_context *ctx);
void radeonReadBuffer( struct gl_context *ctx, GLenum mode );
void radeon_viewport(struct gl_context *ctx);
void radeon_fbo_init(struct radeon_context *radeon);
void
radeon_renderbuffer_set_bo(struct radeon_renderbuffer *rb,
struct radeon_bo *bo);
struct radeon_renderbuffer *
radeon_create_renderbuffer(mesa_format format, __DRIdrawable *driDrawPriv);
void
radeonReadPixels(struct gl_context * ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack, GLvoid * pixels);
static inline struct radeon_renderbuffer *radeon_renderbuffer(struct gl_renderbuffer *rb)
{
struct radeon_renderbuffer *rrb = (struct radeon_renderbuffer *)rb;
radeon_print(RADEON_MEMORY, RADEON_TRACE,
"%s(rb %p)\n",
__func__, (void *) rb);
if (rrb && rrb->base.Base.ClassID == RADEON_RB_CLASS)
return rrb;
else
return NULL;
}
static inline struct radeon_renderbuffer *radeon_get_renderbuffer(struct gl_framebuffer *fb, int att_index)
{
radeon_print(RADEON_MEMORY, RADEON_TRACE,
"%s(fb %p, index %d)\n",
__func__, (void *) fb, att_index);
if (att_index >= 0)
return radeon_renderbuffer(fb->Attachment[att_index].Renderbuffer);
else
return NULL;
}
static inline struct radeon_renderbuffer *radeon_get_depthbuffer(radeonContextPtr rmesa)
{
struct radeon_renderbuffer *rrb;
rrb = radeon_renderbuffer(rmesa->state.depth.rb);
if (!rrb)
return NULL;
return rrb;
}
static inline struct radeon_renderbuffer *radeon_get_colorbuffer(radeonContextPtr rmesa)
{
struct radeon_renderbuffer *rrb;
rrb = radeon_renderbuffer(rmesa->state.color.rb);
if (!rrb)
return NULL;
return rrb;
}
#include "radeon_cmdbuf.h"
#endif

View File

@ -1,653 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VA Linux Systems Inc., Fremont, California.
Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
The Weather Channel (TM) funded Tungsten Graphics to develop the
initial release of the Radeon 8500 driver under the XFree86 license.
This notice must be preserved.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
#include "radeon_common.h"
#include "util/driconf.h" /* for symbolic values of enum-type options */
#include "utils.h"
#include "drivers/common/meta.h"
#include "main/context.h"
#include "main/framebuffer.h"
#include "main/fbobject.h"
#include "main/renderbuffer.h"
#include "main/state.h"
#include "util/simple_list.h"
#include "swrast/swrast.h"
#include "swrast_setup/swrast_setup.h"
#include "tnl/tnl.h"
#include "util/u_memory.h"
#ifndef RADEON_DEBUG
int RADEON_DEBUG = (0);
#endif
static const char* get_chip_family_name(int chip_family)
{
switch(chip_family) {
#if defined(RADEON_R100)
case CHIP_FAMILY_R100: return "R100";
case CHIP_FAMILY_RV100: return "RV100";
case CHIP_FAMILY_RS100: return "RS100";
case CHIP_FAMILY_RV200: return "RV200";
case CHIP_FAMILY_RS200: return "RS200";
#elif defined(RADEON_R200)
case CHIP_FAMILY_R200: return "R200";
case CHIP_FAMILY_RV250: return "RV250";
case CHIP_FAMILY_RS300: return "RS300";
case CHIP_FAMILY_RV280: return "RV280";
#endif
default: return "unknown";
}
}
const char *const radeonVendorString = "Mesa Project";
/* Return complete renderer string.
*/
const char *radeonGetRendererString(radeonScreenPtr radeonScreen)
{
static char buffer[128];
char hardwarename[32];
GLuint agp_mode = (radeonScreen->card_type==RADEON_CARD_PCI) ? 0 :
radeonScreen->AGPMode;
snprintf(hardwarename, sizeof(hardwarename), "%s (%s %04X)",
#if defined(RADEON_R100)
"R100",
#elif defined(RADEON_R200)
"R200",
#endif
get_chip_family_name(radeonScreen->chip_family),
radeonScreen->device_id);
driGetRendererString(buffer, hardwarename, agp_mode);
strcat(buffer, " DRI2");
return buffer;
}
/* Return various strings for glGetString().
*/
static const GLubyte *radeonGetString(struct gl_context * ctx, GLenum name)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
switch (name) {
case GL_VENDOR:
return (GLubyte *) radeonVendorString;
case GL_RENDERER:
return (GLubyte *) radeonGetRendererString(radeon->radeonScreen);
default:
return NULL;
}
}
/* Initialize the driver's misc functions.
*/
static void radeonInitDriverFuncs(struct dd_function_table *functions)
{
functions->GetString = radeonGetString;
}
/**
* Create and initialize all common fields of the context,
* including the Mesa context itself.
*/
GLboolean radeonInitContext(radeonContextPtr radeon,
gl_api api,
struct dd_function_table* functions,
const struct gl_config * glVisual,
__DRIcontext * driContextPriv,
void *sharedContextPrivate)
{
__DRIscreen *sPriv = driContextPriv->driScreenPriv;
radeonScreenPtr screen = (radeonScreenPtr) (sPriv->driverPrivate);
struct gl_context* ctx;
struct gl_context* shareCtx;
int fthrottle_mode;
/* Fill in additional standard functions. */
radeonInitDriverFuncs(functions);
radeon->radeonScreen = screen;
/* Allocate and initialize the Mesa context */
if (sharedContextPrivate)
shareCtx = &((radeonContextPtr)sharedContextPrivate)->glCtx;
else
shareCtx = NULL;
if (!_mesa_initialize_context(&radeon->glCtx, api,
glVisual, shareCtx,
functions))
return GL_FALSE;
ctx = &radeon->glCtx;
driContextPriv->driverPrivate = radeon;
_mesa_meta_init(ctx);
/* DRI fields */
radeon->driContext = driContextPriv;
/* Setup IRQs */
fthrottle_mode = driQueryOptioni(&radeon->optionCache, "fthrottle_mode");
radeon->iw.irq_seq = -1;
radeon->irqsEmitted = 0;
radeon->do_irqs = (fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS &&
radeon->radeonScreen->irq);
radeon->do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS);
if (!radeon->do_irqs)
fprintf(stderr,
"IRQ's not enabled, falling back to %s: %d %d\n",
radeon->do_usleeps ? "usleeps" : "busy waits",
fthrottle_mode, radeon->radeonScreen->irq);
radeon->texture_depth = driQueryOptioni (&radeon->optionCache,
"texture_depth");
if (radeon->texture_depth == DRI_CONF_TEXTURE_DEPTH_FB)
radeon->texture_depth = (glVisual == NULL || glVisual->rgbBits > 16) ?
DRI_CONF_TEXTURE_DEPTH_32 : DRI_CONF_TEXTURE_DEPTH_16;
radeon->texture_row_align = 32;
radeon->texture_rect_row_align = 64;
radeon->texture_compressed_row_align = 32;
radeon_init_dma(radeon);
/* _mesa_initialize_context calls _mesa_init_queryobj which
* initializes all of the counter sizes to 64. The counters on r100
* and r200 are only 32-bits for occlusion queries. Those are the
* only counters, so set the other sizes to zero.
*/
radeon->glCtx.Const.QueryCounterBits.SamplesPassed = 32;
radeon->glCtx.Const.QueryCounterBits.TimeElapsed = 0;
radeon->glCtx.Const.QueryCounterBits.Timestamp = 0;
radeon->glCtx.Const.QueryCounterBits.PrimitivesGenerated = 0;
radeon->glCtx.Const.QueryCounterBits.PrimitivesWritten = 0;
radeon->glCtx.Const.QueryCounterBits.VerticesSubmitted = 0;
radeon->glCtx.Const.QueryCounterBits.PrimitivesSubmitted = 0;
radeon->glCtx.Const.QueryCounterBits.VsInvocations = 0;
radeon->glCtx.Const.QueryCounterBits.TessPatches = 0;
radeon->glCtx.Const.QueryCounterBits.TessInvocations = 0;
radeon->glCtx.Const.QueryCounterBits.GsInvocations = 0;
radeon->glCtx.Const.QueryCounterBits.GsPrimitives = 0;
radeon->glCtx.Const.QueryCounterBits.FsInvocations = 0;
radeon->glCtx.Const.QueryCounterBits.ComputeInvocations = 0;
radeon->glCtx.Const.QueryCounterBits.ClInPrimitives = 0;
radeon->glCtx.Const.QueryCounterBits.ClOutPrimitives = 0;
return GL_TRUE;
}
/**
* Destroy the command buffer and state atoms.
*/
static void radeon_destroy_atom_list(radeonContextPtr radeon)
{
struct radeon_state_atom *atom;
foreach(atom, &radeon->hw.atomlist) {
free(atom->cmd);
free(atom->lastcmd);
}
}
/**
* Cleanup common context fields.
* Called by r200DestroyContext
*/
void radeonDestroyContext(__DRIcontext *driContextPriv )
{
#ifdef RADEON_BO_TRACK
FILE *track;
#endif
GET_CURRENT_CONTEXT(ctx);
radeonContextPtr radeon = (radeonContextPtr) driContextPriv->driverPrivate;
radeonContextPtr current = ctx ? RADEON_CONTEXT(ctx) : NULL;
assert(radeon);
_mesa_meta_free(&radeon->glCtx);
if (radeon == current) {
_mesa_make_current(NULL, NULL, NULL);
}
radeon_firevertices(radeon);
if (!is_empty_list(&radeon->dma.reserved)) {
rcommonFlushCmdBuf( radeon, __func__ );
}
radeonFreeDmaRegions(radeon);
radeonReleaseArrays(&radeon->glCtx, ~0);
if (radeon->vtbl.free_context)
radeon->vtbl.free_context(&radeon->glCtx);
_swsetup_DestroyContext( &radeon->glCtx );
_tnl_DestroyContext( &radeon->glCtx );
_vbo_DestroyContext( &radeon->glCtx );
_swrast_DestroyContext( &radeon->glCtx );
/* free atom list */
/* free the Mesa context data */
_mesa_free_context_data(&radeon->glCtx, true);
/* free the option cache */
driDestroyOptionCache(&radeon->optionCache);
rcommonDestroyCmdBuf(radeon);
radeon_destroy_atom_list(radeon);
#ifdef RADEON_BO_TRACK
track = fopen("/tmp/tracklog", "w");
if (track) {
radeon_tracker_print(&radeon->radeonScreen->bom->tracker, track);
fclose(track);
}
#endif
align_free(radeon);
}
/* Force the context `c' to be unbound from its buffer.
*/
GLboolean radeonUnbindContext(__DRIcontext * driContextPriv)
{
radeonContextPtr radeon = (radeonContextPtr) driContextPriv->driverPrivate;
if (RADEON_DEBUG & RADEON_DRI)
fprintf(stderr, "%s ctx %p\n", __func__,
&radeon->glCtx);
/* Unset current context and dispath table */
_mesa_make_current(NULL, NULL, NULL);
return GL_TRUE;
}
static unsigned
radeon_bits_per_pixel(const struct radeon_renderbuffer *rb)
{
return _mesa_get_format_bytes(rb->base.Base.Format) * 8;
}
/*
* Check if drawable has been invalidated by dri2InvalidateDrawable().
* Update renderbuffers if so. This prevents a client from accessing
* a backbuffer that has a swap pending but not yet completed.
*
* See intel_prepare_render for equivalent code in intel driver.
*
*/
void radeon_prepare_render(radeonContextPtr radeon)
{
__DRIcontext *driContext = radeon->driContext;
__DRIdrawable *drawable;
__DRIscreen *screen;
screen = driContext->driScreenPriv;
if (!screen->dri2.loader)
return;
drawable = driContext->driDrawablePriv;
if (drawable->dri2.stamp != driContext->dri2.draw_stamp) {
if (drawable->lastStamp != drawable->dri2.stamp)
radeon_update_renderbuffers(driContext, drawable, GL_FALSE);
/* Intel driver does the equivalent of this, no clue if it is needed:*/
radeon_draw_buffer(&radeon->glCtx, radeon->glCtx.DrawBuffer);
driContext->dri2.draw_stamp = drawable->dri2.stamp;
}
drawable = driContext->driReadablePriv;
if (drawable->dri2.stamp != driContext->dri2.read_stamp) {
if (drawable->lastStamp != drawable->dri2.stamp)
radeon_update_renderbuffers(driContext, drawable, GL_FALSE);
driContext->dri2.read_stamp = drawable->dri2.stamp;
}
/* If we're currently rendering to the front buffer, the rendering
* that will happen next will probably dirty the front buffer. So
* mark it as dirty here.
*/
if (_mesa_is_front_buffer_drawing(radeon->glCtx.DrawBuffer))
radeon->front_buffer_dirty = GL_TRUE;
}
void
radeon_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable,
GLboolean front_only)
{
unsigned int attachments[__DRI_BUFFER_COUNT];
__DRIbuffer *buffers = NULL;
__DRIscreen *screen;
struct radeon_renderbuffer *rb;
int i, count;
struct radeon_framebuffer *draw;
radeonContextPtr radeon;
char *regname;
struct radeon_bo *depth_bo = NULL, *bo;
if (RADEON_DEBUG & RADEON_DRI)
fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
draw = drawable->driverPrivate;
screen = context->driScreenPriv;
radeon = (radeonContextPtr) context->driverPrivate;
/* Set this up front, so that in case our buffers get invalidated
* while we're getting new buffers, we don't clobber the stamp and
* thus ignore the invalidate. */
drawable->lastStamp = drawable->dri2.stamp;
if (screen->dri2.loader
&& (screen->dri2.loader->base.version > 2)
&& (screen->dri2.loader->getBuffersWithFormat != NULL)) {
struct radeon_renderbuffer *depth_rb;
struct radeon_renderbuffer *stencil_rb;
i = 0;
if ((front_only || _mesa_is_front_buffer_drawing(&draw->base) ||
_mesa_is_front_buffer_reading(&draw->base) ||
!draw->color_rb[1])
&& draw->color_rb[0]) {
attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
attachments[i++] = radeon_bits_per_pixel(draw->color_rb[0]);
}
if (!front_only) {
if (draw->color_rb[1]) {
attachments[i++] = __DRI_BUFFER_BACK_LEFT;
attachments[i++] = radeon_bits_per_pixel(draw->color_rb[1]);
}
depth_rb = radeon_get_renderbuffer(&draw->base, BUFFER_DEPTH);
stencil_rb = radeon_get_renderbuffer(&draw->base, BUFFER_STENCIL);
if ((depth_rb != NULL) && (stencil_rb != NULL)) {
attachments[i++] = __DRI_BUFFER_DEPTH_STENCIL;
attachments[i++] = radeon_bits_per_pixel(depth_rb);
} else if (depth_rb != NULL) {
attachments[i++] = __DRI_BUFFER_DEPTH;
attachments[i++] = radeon_bits_per_pixel(depth_rb);
} else if (stencil_rb != NULL) {
attachments[i++] = __DRI_BUFFER_STENCIL;
attachments[i++] = radeon_bits_per_pixel(stencil_rb);
}
}
buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
&drawable->w,
&drawable->h,
attachments, i / 2,
&count,
drawable->loaderPrivate);
} else if (screen->dri2.loader) {
i = 0;
if (draw->color_rb[0])
attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
if (!front_only) {
if (draw->color_rb[1])
attachments[i++] = __DRI_BUFFER_BACK_LEFT;
if (radeon_get_renderbuffer(&draw->base, BUFFER_DEPTH))
attachments[i++] = __DRI_BUFFER_DEPTH;
if (radeon_get_renderbuffer(&draw->base, BUFFER_STENCIL))
attachments[i++] = __DRI_BUFFER_STENCIL;
}
buffers = screen->dri2.loader->getBuffers(drawable,
&drawable->w,
&drawable->h,
attachments, i,
&count,
drawable->loaderPrivate);
}
if (buffers == NULL)
return;
for (i = 0; i < count; i++) {
switch (buffers[i].attachment) {
case __DRI_BUFFER_FRONT_LEFT:
rb = draw->color_rb[0];
regname = "dri2 front buffer";
break;
case __DRI_BUFFER_FAKE_FRONT_LEFT:
rb = draw->color_rb[0];
regname = "dri2 fake front buffer";
break;
case __DRI_BUFFER_BACK_LEFT:
rb = draw->color_rb[1];
regname = "dri2 back buffer";
break;
case __DRI_BUFFER_DEPTH:
rb = radeon_get_renderbuffer(&draw->base, BUFFER_DEPTH);
regname = "dri2 depth buffer";
break;
case __DRI_BUFFER_DEPTH_STENCIL:
rb = radeon_get_renderbuffer(&draw->base, BUFFER_DEPTH);
regname = "dri2 depth / stencil buffer";
break;
case __DRI_BUFFER_STENCIL:
rb = radeon_get_renderbuffer(&draw->base, BUFFER_STENCIL);
regname = "dri2 stencil buffer";
break;
case __DRI_BUFFER_ACCUM:
default:
fprintf(stderr,
"unhandled buffer attach event, attacment type %d\n",
buffers[i].attachment);
return;
}
if (rb == NULL)
continue;
if (rb->bo) {
uint32_t name = radeon_gem_name_bo(rb->bo);
if (name == buffers[i].name)
continue;
}
if (RADEON_DEBUG & RADEON_DRI)
fprintf(stderr,
"attaching buffer %s, %d, at %d, cpp %d, pitch %d\n",
regname, buffers[i].name, buffers[i].attachment,
buffers[i].cpp, buffers[i].pitch);
rb->cpp = buffers[i].cpp;
rb->pitch = buffers[i].pitch;
rb->base.Base.Width = drawable->w;
rb->base.Base.Height = drawable->h;
rb->has_surface = 0;
if (buffers[i].attachment == __DRI_BUFFER_STENCIL && depth_bo) {
if (RADEON_DEBUG & RADEON_DRI)
fprintf(stderr, "(reusing depth buffer as stencil)\n");
bo = depth_bo;
radeon_bo_ref(bo);
} else {
uint32_t tiling_flags = 0, pitch = 0;
int ret;
bo = radeon_bo_open(radeon->radeonScreen->bom,
buffers[i].name,
0,
0,
RADEON_GEM_DOMAIN_VRAM,
buffers[i].flags);
if (bo == NULL) {
fprintf(stderr, "failed to attach %s %d\n",
regname, buffers[i].name);
continue;
}
ret = radeon_bo_get_tiling(bo, &tiling_flags, &pitch);
if (ret) {
fprintf(stderr,
"failed to get tiling for %s %d\n",
regname, buffers[i].name);
radeon_bo_unref(bo);
bo = NULL;
continue;
} else {
if (tiling_flags & RADEON_TILING_MACRO)
bo->flags |= RADEON_BO_FLAGS_MACRO_TILE;
if (tiling_flags & RADEON_TILING_MICRO)
bo->flags |= RADEON_BO_FLAGS_MICRO_TILE;
}
}
if (buffers[i].attachment == __DRI_BUFFER_DEPTH) {
if (draw->base.Visual.depthBits == 16)
rb->cpp = 2;
depth_bo = bo;
}
radeon_renderbuffer_set_bo(rb, bo);
radeon_bo_unref(bo);
if (buffers[i].attachment == __DRI_BUFFER_DEPTH_STENCIL) {
rb = radeon_get_renderbuffer(&draw->base, BUFFER_STENCIL);
if (rb != NULL) {
struct radeon_bo *stencil_bo = NULL;
if (rb->bo) {
uint32_t name = radeon_gem_name_bo(rb->bo);
if (name == buffers[i].name)
continue;
}
stencil_bo = bo;
radeon_bo_ref(stencil_bo);
radeon_renderbuffer_set_bo(rb, stencil_bo);
radeon_bo_unref(stencil_bo);
}
}
}
driUpdateFramebufferSize(&radeon->glCtx, drawable);
}
/* Force the context `c' to be the current context and associate with it
* buffer `b'.
*/
GLboolean radeonMakeCurrent(__DRIcontext * driContextPriv,
__DRIdrawable * driDrawPriv,
__DRIdrawable * driReadPriv)
{
radeonContextPtr radeon;
GET_CURRENT_CONTEXT(curCtx);
struct gl_framebuffer *drfb, *readfb;
if (driContextPriv)
radeon = (radeonContextPtr)driContextPriv->driverPrivate;
else
radeon = NULL;
/* According to the glXMakeCurrent() man page: "Pending commands to
* the previous context, if any, are flushed before it is released."
* But only flush if we're actually changing contexts.
*/
if ((radeonContextPtr)curCtx && (radeonContextPtr)curCtx != radeon) {
_mesa_flush(curCtx);
}
if (!driContextPriv) {
if (RADEON_DEBUG & RADEON_DRI)
fprintf(stderr, "%s ctx is null\n", __func__);
_mesa_make_current(NULL, NULL, NULL);
return GL_TRUE;
}
if(driDrawPriv == NULL && driReadPriv == NULL) {
drfb = _mesa_get_incomplete_framebuffer();
readfb = drfb;
}
else {
drfb = driDrawPriv->driverPrivate;
readfb = driReadPriv->driverPrivate;
}
if(driDrawPriv)
radeon_update_renderbuffers(driContextPriv, driDrawPriv, GL_FALSE);
if (driDrawPriv != driReadPriv)
radeon_update_renderbuffers(driContextPriv, driReadPriv, GL_FALSE);
_mesa_reference_renderbuffer(&radeon->state.color.rb,
&(radeon_get_renderbuffer(drfb, BUFFER_BACK_LEFT)->base.Base));
_mesa_reference_renderbuffer(&radeon->state.depth.rb,
&(radeon_get_renderbuffer(drfb, BUFFER_DEPTH)->base.Base));
if (RADEON_DEBUG & RADEON_DRI)
fprintf(stderr, "%s ctx %p dfb %p rfb %p\n", __func__, &radeon->glCtx, drfb, readfb);
if(driDrawPriv)
driUpdateFramebufferSize(&radeon->glCtx, driDrawPriv);
if (driReadPriv != driDrawPriv)
driUpdateFramebufferSize(&radeon->glCtx, driReadPriv);
_mesa_make_current(&radeon->glCtx, drfb, readfb);
if (driDrawPriv == NULL && driReadPriv == NULL)
_mesa_reference_framebuffer(&drfb, NULL);
_mesa_update_state(&radeon->glCtx);
if (radeon->glCtx.DrawBuffer == drfb) {
if(driDrawPriv != NULL) {
radeon_window_moved(radeon);
}
radeon_draw_buffer(&radeon->glCtx, drfb);
}
if (RADEON_DEBUG & RADEON_DRI)
fprintf(stderr, "End %s\n", __func__);
return GL_TRUE;
}

View File

@ -1,503 +0,0 @@
#ifndef COMMON_CONTEXT_H
#define COMMON_CONTEXT_H
#include "math/m_vector.h"
#include "tnl/t_context.h"
#include "main/colormac.h"
#include "radeon_screen.h"
#include "radeon_debug.h"
#include "radeon_drm.h"
#include "dri_util.h"
#include "tnl/t_vertex.h"
#include "swrast/s_context.h"
struct radeon_context;
#include "radeon_bo_gem.h"
#include "radeon_cs_gem.h"
/* This union is used to avoid warnings/miscompilation
with float to uint32_t casts due to strict-aliasing */
typedef union { GLfloat f; uint32_t ui32; } float_ui32_type;
struct radeon_context;
typedef struct radeon_context radeonContextRec;
typedef struct radeon_context *radeonContextPtr;
#define TEX_0 0x1
#define TEX_1 0x2
#define TEX_2 0x4
#define TEX_3 0x8
#define TEX_4 0x10
#define TEX_5 0x20
/* Rasterizing fallbacks */
/* See correponding strings in r200_swtcl.c */
#define RADEON_FALLBACK_TEXTURE 0x0001
#define RADEON_FALLBACK_DRAW_BUFFER 0x0002
#define RADEON_FALLBACK_STENCIL 0x0004
#define RADEON_FALLBACK_RENDER_MODE 0x0008
#define RADEON_FALLBACK_BLEND_EQ 0x0010
#define RADEON_FALLBACK_BLEND_FUNC 0x0020
#define RADEON_FALLBACK_DISABLE 0x0040
#define RADEON_FALLBACK_BORDER_MODE 0x0080
#define RADEON_FALLBACK_DEPTH_BUFFER 0x0100
#define RADEON_FALLBACK_STENCIL_BUFFER 0x0200
#define R200_FALLBACK_TEXTURE 0x01
#define R200_FALLBACK_DRAW_BUFFER 0x02
#define R200_FALLBACK_STENCIL 0x04
#define R200_FALLBACK_RENDER_MODE 0x08
#define R200_FALLBACK_DISABLE 0x10
#define R200_FALLBACK_BORDER_MODE 0x20
#define RADEON_TCL_FALLBACK_RASTER 0x1 /* rasterization */
#define RADEON_TCL_FALLBACK_UNFILLED 0x2 /* unfilled tris */
#define RADEON_TCL_FALLBACK_LIGHT_TWOSIDE 0x4 /* twoside tris */
#define RADEON_TCL_FALLBACK_MATERIAL 0x8 /* material in vb */
#define RADEON_TCL_FALLBACK_TEXGEN_0 0x10 /* texgen, unit 0 */
#define RADEON_TCL_FALLBACK_TEXGEN_1 0x20 /* texgen, unit 1 */
#define RADEON_TCL_FALLBACK_TEXGEN_2 0x40 /* texgen, unit 2 */
#define RADEON_TCL_FALLBACK_TCL_DISABLE 0x80 /* user disable */
#define RADEON_TCL_FALLBACK_FOGCOORDSPEC 0x100 /* fogcoord, sep. spec light */
/* The blit width for texture uploads
*/
#define BLIT_WIDTH_BYTES 1024
/* Use the templated vertex format:
*/
#define COLOR_IS_RGBA
#define TAG(x) radeon##x
#include "tnl_dd/t_dd_vertex.h"
#undef TAG
#define RADEON_RB_CLASS 0xdeadbeef
struct radeon_renderbuffer
{
struct swrast_renderbuffer base;
struct radeon_bo *bo;
unsigned int cpp;
/* unsigned int offset; */
unsigned int pitch;
struct radeon_bo *map_bo;
GLbitfield map_mode;
int map_x, map_y, map_w, map_h;
int map_pitch;
void *map_buffer;
uint32_t draw_offset; /* FBO */
/* boo Xorg 6.8.2 compat */
int has_surface;
GLuint pf_pending; /**< sequence number of pending flip */
__DRIdrawable *dPriv;
};
struct radeon_framebuffer
{
struct gl_framebuffer base;
struct radeon_renderbuffer *color_rb[2];
};
struct radeon_colorbuffer_state {
int roundEnable;
struct gl_renderbuffer *rb;
uint32_t draw_offset; /* offset into color renderbuffer - FBOs */
};
struct radeon_depthbuffer_state {
struct gl_renderbuffer *rb;
};
struct radeon_scissor_state {
drm_clip_rect_t rect;
GLboolean enabled;
};
struct radeon_state_atom {
struct radeon_state_atom *next, *prev;
const char *name; /* for debug */
int cmd_size; /* size in bytes */
GLuint idx;
GLuint is_tcl;
GLuint *cmd; /* one or more cmd's */
GLuint *lastcmd; /* one or more cmd's */
GLboolean dirty; /* dirty-mark in emit_state_list */
int (*check) (struct gl_context *, struct radeon_state_atom *atom); /* is this state active? */
void (*emit) (struct gl_context *, struct radeon_state_atom *atom);
};
struct radeon_hw_state {
/* Head of the linked list of state atoms. */
struct radeon_state_atom atomlist;
int max_state_size; /* Number of bytes necessary for a full state emit. */
int max_post_flush_size; /* Number of bytes necessary for post flushing emits */
GLboolean is_dirty, all_dirty;
};
/* Texture related */
typedef struct _radeon_texture_image radeon_texture_image;
/**
* This is a subclass of swrast_texture_image since we use swrast
* for software fallback rendering.
*/
struct _radeon_texture_image {
struct swrast_texture_image base;
/**
* If mt != 0, the image is stored in hardware format in the
* given mipmap tree. In this case, base.Data may point into the
* mapping of the buffer object that contains the mipmap tree.
*
* If mt == 0, the image is stored in normal memory pointed to
* by base.Data.
*/
struct _radeon_mipmap_tree *mt;
struct radeon_bo *bo;
GLboolean used_as_render_target;
};
static inline radeon_texture_image *get_radeon_texture_image(struct gl_texture_image *image)
{
return (radeon_texture_image*)image;
}
typedef struct radeon_tex_obj radeonTexObj, *radeonTexObjPtr;
#define RADEON_TXO_MICRO_TILE (1 << 3)
/* Texture object in locally shared texture space.
*/
struct radeon_tex_obj {
struct gl_texture_object base;
struct _radeon_mipmap_tree *mt;
/**
* This is true if we've verified that the mipmap tree above is complete
* and so on.
*/
GLboolean validated;
/* Minimum LOD to be used during rendering */
unsigned minLod;
/* Miximum LOD to be used during rendering */
unsigned maxLod;
GLuint override_offset;
GLboolean image_override; /* Image overridden by GLX_EXT_tfp */
GLuint tile_bits; /* hw texture tile bits used on this texture */
struct radeon_bo *bo;
GLuint pp_txfilter; /* hardware register values */
GLuint pp_txformat;
GLuint pp_txformat_x;
GLuint pp_txsize; /* npot only */
GLuint pp_txpitch; /* npot only */
GLuint pp_border_color;
GLuint pp_cubic_faces; /* cube face 1,2,3,4 log2 sizes */
GLboolean border_fallback;
};
static inline radeonTexObj* radeon_tex_obj(struct gl_texture_object *texObj)
{
return (radeonTexObj*)texObj;
}
/* occlusion query */
struct radeon_query_object {
struct gl_query_object Base;
struct radeon_bo *bo;
int curr_offset;
GLboolean emitted_begin;
/* Double linked list of not flushed query objects */
struct radeon_query_object *prev, *next;
};
/* Need refcounting on dma buffers:
*/
struct radeon_dma_buffer {
int refcount; /* the number of retained regions in buf */
drmBufPtr buf;
};
struct radeon_aos {
struct radeon_bo *bo; /** Buffer object where vertex data is stored */
int offset; /** Offset into buffer object, in bytes */
int components; /** Number of components per vertex */
int stride; /** Stride in dwords (may be 0 for repeating) */
int count; /** Number of vertices */
};
#define DMA_BO_FREE_TIME 100
struct radeon_dma_bo {
struct radeon_dma_bo *next, *prev;
struct radeon_bo *bo;
int expire_counter;
};
struct radeon_dma {
/* Active dma region. Allocations for vertices and retained
* regions come from here. Also used for emitting random vertices,
* these may be flushed by calling flush_current();
*/
struct radeon_dma_bo free;
struct radeon_dma_bo wait;
struct radeon_dma_bo reserved;
size_t current_used; /** Number of bytes allocated and forgotten about */
size_t current_vertexptr; /** End of active vertex region */
size_t minimum_size;
/**
* If current_vertexptr != current_used then flush must be non-zero.
* flush must be called before non-active vertex allocations can be
* performed.
*/
void (*flush) (struct gl_context *);
};
/* radeon_swtcl.c
*/
struct radeon_swtcl_info {
GLuint RenderIndex;
GLuint vertex_size;
GLubyte *verts;
/* Fallback rasterization functions
*/
GLuint hw_primitive;
GLenum render_primitive;
GLuint numverts;
struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
GLuint vertex_attr_count;
GLuint emit_prediction;
struct radeon_bo *bo;
};
#define RADEON_MAX_AOS_ARRAYS 16
struct radeon_tcl_info {
struct radeon_aos aos[RADEON_MAX_AOS_ARRAYS];
GLuint aos_count;
struct radeon_bo *elt_dma_bo; /** Buffer object that contains element indices */
int elt_dma_offset; /** Offset into this buffer object, in bytes */
};
struct radeon_ioctl {
GLuint vertex_offset;
GLuint vertex_max;
struct radeon_bo *bo;
GLuint vertex_size;
};
#define RADEON_MAX_PRIMS 64
struct radeon_prim {
GLuint start;
GLuint end;
GLuint prim;
};
static inline GLuint radeonPackColor(GLuint cpp,
GLubyte r, GLubyte g,
GLubyte b, GLubyte a)
{
switch (cpp) {
case 2:
return PACK_COLOR_565(r, g, b);
case 4:
return PACK_COLOR_8888(a, r, g, b);
default:
return 0;
}
}
#define MAX_CMD_BUF_SZ (16*1024)
#define MAX_DMA_BUF_SZ (64*1024)
struct radeon_store {
GLuint statenr;
GLuint primnr;
char cmd_buf[MAX_CMD_BUF_SZ];
int cmd_used;
int elts_start;
};
typedef void (*radeon_tri_func) (radeonContextPtr,
radeonVertex *,
radeonVertex *, radeonVertex *);
typedef void (*radeon_line_func) (radeonContextPtr,
radeonVertex *, radeonVertex *);
typedef void (*radeon_point_func) (radeonContextPtr, radeonVertex *);
#define RADEON_MAX_BOS 32
struct radeon_state {
struct radeon_colorbuffer_state color;
struct radeon_depthbuffer_state depth;
struct radeon_scissor_state scissor;
};
/**
* This structure holds the command buffer while it is being constructed.
*
* The first batch of commands in the buffer is always the state that needs
* to be re-emitted when the context is lost. This batch can be skipped
* otherwise.
*/
struct radeon_cmdbuf {
struct radeon_cs_manager *csm;
struct radeon_cs *cs;
int size; /** # of dwords total */
unsigned int flushing:1; /** whether we're currently in FlushCmdBufLocked */
};
struct radeon_context {
struct gl_context glCtx; /**< base class, must be first */
__DRIcontext *driContext; /* DRI context */
radeonScreenPtr radeonScreen; /* Screen private DRI data */
/* Texture object bookkeeping
*/
int texture_depth;
float initialMaxAnisotropy;
uint32_t texture_row_align;
uint32_t texture_rect_row_align;
uint32_t texture_compressed_row_align;
struct radeon_dma dma;
struct radeon_hw_state hw;
/* Rasterization and vertex state:
*/
GLuint TclFallback;
GLuint Fallback;
GLuint NewGLState;
GLbitfield64 tnl_index_bitset; /* index of bits for last tnl_install_attrs */
/* Drawable information */
unsigned int lastStamp;
/* Busy waiting */
GLuint do_usleeps;
GLuint do_irqs;
GLuint irqsEmitted;
drm_radeon_irq_wait_t iw;
/* Derived state - for r300 only */
struct radeon_state state;
struct radeon_swtcl_info swtcl;
struct radeon_tcl_info tcl;
/* Configuration cache
*/
driOptionCache optionCache;
struct radeon_cmdbuf cmdbuf;
struct radeon_debug debug;
drm_clip_rect_t fboRect;
GLboolean front_cliprects;
/**
* Set if rendering has occurred to the drawable's front buffer.
*
* This is used in the DRI2 case to detect that glFlush should also copy
* the contents of the fake front buffer to the real front buffer.
*/
GLboolean front_buffer_dirty;
struct {
struct radeon_query_object *current;
struct radeon_state_atom queryobj;
} query;
struct {
void (*swtcl_flush)(struct gl_context *ctx, uint32_t offset);
void (*pre_emit_state)(radeonContextPtr rmesa);
void (*fallback)(struct gl_context *ctx, GLuint bit, GLboolean mode);
void (*free_context)(struct gl_context *ctx);
void (*emit_query_finish)(radeonContextPtr radeon);
void (*update_scissor)(struct gl_context *ctx);
unsigned (*check_blit)(mesa_format mesa_format, uint32_t dst_pitch);
unsigned (*blit)(struct gl_context *ctx,
struct radeon_bo *src_bo,
intptr_t src_offset,
mesa_format src_mesaformat,
unsigned src_pitch,
unsigned src_width,
unsigned src_height,
unsigned src_x_offset,
unsigned src_y_offset,
struct radeon_bo *dst_bo,
intptr_t dst_offset,
mesa_format dst_mesaformat,
unsigned dst_pitch,
unsigned dst_width,
unsigned dst_height,
unsigned dst_x_offset,
unsigned dst_y_offset,
unsigned reg_width,
unsigned reg_height,
unsigned flip_y);
unsigned (*is_format_renderable)(mesa_format mesa_format);
GLboolean (*revalidate_all_buffers)(struct gl_context *ctx);
} vtbl;
};
static inline radeonContextPtr RADEON_CONTEXT(struct gl_context *ctx)
{
return (radeonContextPtr) ctx;
}
static inline __DRIdrawable* radeon_get_drawable(radeonContextPtr radeon)
{
return radeon->driContext->driDrawablePriv;
}
static inline __DRIdrawable* radeon_get_readable(radeonContextPtr radeon)
{
return radeon->driContext->driReadablePriv;
}
extern const char *const radeonVendorString;
const char *radeonGetRendererString(radeonScreenPtr radeonScreen);
GLboolean radeonInitContext(radeonContextPtr radeon,
gl_api api,
struct dd_function_table* functions,
const struct gl_config * glVisual,
__DRIcontext * driContextPriv,
void *sharedContextPrivate);
void radeonCleanupContext(radeonContextPtr radeon);
GLboolean radeonUnbindContext(__DRIcontext * driContextPriv);
void radeon_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable,
GLboolean front_only);
GLboolean radeonMakeCurrent(__DRIcontext * driContextPriv,
__DRIdrawable * driDrawPriv,
__DRIdrawable * driReadPriv);
extern void radeonDestroyContext(__DRIcontext * driContextPriv);
void radeon_prepare_render(radeonContextPtr radeon);
#endif

View File

@ -1,359 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VA Linux Systems Inc., Fremont, California.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
* Keith Whitwell <keithw@vmware.com>
*/
#include <stdbool.h>
#include "main/glheader.h"
#include "main/api_arrayelt.h"
#include "main/api_exec.h"
#include "main/context.h"
#include "util/simple_list.h"
#include "main/extensions.h"
#include "main/version.h"
#include "main/vtxfmt.h"
#include "swrast/swrast.h"
#include "swrast_setup/swrast_setup.h"
#include "vbo/vbo.h"
#include "tnl/tnl.h"
#include "tnl/t_pipeline.h"
#include "drivers/common/driverfuncs.h"
#include "radeon_common.h"
#include "radeon_context.h"
#include "radeon_ioctl.h"
#include "radeon_state.h"
#include "radeon_span.h"
#include "radeon_tex.h"
#include "radeon_swtcl.h"
#include "radeon_tcl.h"
#include "radeon_queryobj.h"
#include "radeon_blit.h"
#include "radeon_fog.h"
#include "utils.h"
#include "util/driconf.h" /* for symbolic values of enum-type options */
#include "util/u_memory.h"
extern const struct tnl_pipeline_stage _radeon_render_stage;
extern const struct tnl_pipeline_stage _radeon_tcl_stage;
static const struct tnl_pipeline_stage *radeon_pipeline[] = {
/* Try and go straight to t&l
*/
&_radeon_tcl_stage,
/* Catch any t&l fallbacks
*/
&_tnl_vertex_transform_stage,
&_tnl_normal_transform_stage,
&_tnl_lighting_stage,
&_tnl_fog_coordinate_stage,
&_tnl_texgen_stage,
&_tnl_texture_transform_stage,
&_radeon_render_stage,
&_tnl_render_stage, /* FALLBACK: */
NULL,
};
static void r100_vtbl_pre_emit_state(radeonContextPtr radeon)
{
r100ContextPtr rmesa = (r100ContextPtr)radeon;
/* r100 always needs to emit ZBS to avoid TCL lockups */
rmesa->hw.zbs.dirty = 1;
radeon->hw.is_dirty = 1;
}
static void r100_vtbl_free_context(struct gl_context *ctx)
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
_mesa_vector4f_free( &rmesa->tcl.ObjClean );
}
static void r100_emit_query_finish(radeonContextPtr radeon)
{
BATCH_LOCALS(radeon);
struct radeon_query_object *query = radeon->query.current;
BEGIN_BATCH(4);
OUT_BATCH(CP_PACKET0(RADEON_RB3D_ZPASS_ADDR, 0));
OUT_BATCH_RELOC(query->bo, query->curr_offset, 0, RADEON_GEM_DOMAIN_GTT, 0);
END_BATCH();
query->curr_offset += sizeof(uint32_t);
assert(query->curr_offset < RADEON_QUERY_PAGE_SIZE);
query->emitted_begin = GL_FALSE;
}
static void r100_init_vtbl(radeonContextPtr radeon)
{
radeon->vtbl.swtcl_flush = r100_swtcl_flush;
radeon->vtbl.pre_emit_state = r100_vtbl_pre_emit_state;
radeon->vtbl.fallback = radeonFallback;
radeon->vtbl.free_context = r100_vtbl_free_context;
radeon->vtbl.emit_query_finish = r100_emit_query_finish;
radeon->vtbl.check_blit = r100_check_blit;
radeon->vtbl.blit = r100_blit;
radeon->vtbl.is_format_renderable = radeonIsFormatRenderable;
radeon->vtbl.revalidate_all_buffers = r100ValidateBuffers;
}
/* Create the device specific context.
*/
GLboolean
r100CreateContext( gl_api api,
const struct gl_config *glVisual,
__DRIcontext *driContextPriv,
const struct __DriverContextConfig *ctx_config,
unsigned *error,
void *sharedContextPrivate)
{
__DRIscreen *sPriv = driContextPriv->driScreenPriv;
radeonScreenPtr screen = (radeonScreenPtr)(sPriv->driverPrivate);
struct dd_function_table functions;
r100ContextPtr rmesa;
struct gl_context *ctx;
int i;
int tcl_mode, fthrottle_mode;
if (ctx_config->flags & ~(__DRI_CTX_FLAG_DEBUG | __DRI_CTX_FLAG_NO_ERROR)) {
*error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
return false;
}
if (ctx_config->attribute_mask) {
*error = __DRI_CTX_ERROR_UNKNOWN_ATTRIBUTE;
return false;
}
assert(driContextPriv);
assert(screen);
/* Allocate the Radeon context */
rmesa = align_calloc(sizeof(*rmesa), 16);
if ( !rmesa ) {
*error = __DRI_CTX_ERROR_NO_MEMORY;
return GL_FALSE;
}
rmesa->radeon.radeonScreen = screen;
r100_init_vtbl(&rmesa->radeon);
/* init exp fog table data */
radeonInitStaticFogData();
/* Parse configuration files.
* Do this here so that initialMaxAnisotropy is set before we create
* the default textures.
*/
driParseConfigFiles (&rmesa->radeon.optionCache, &screen->optionCache,
screen->driScreen->myNum, "radeon", NULL, NULL, NULL, 0, NULL, 0);
rmesa->radeon.initialMaxAnisotropy = driQueryOptionf(&rmesa->radeon.optionCache,
"def_max_anisotropy");
if (driQueryOptionb(&rmesa->radeon.optionCache, "hyperz"))
rmesa->using_hyperz = GL_TRUE;
/* Init default driver functions then plug in our Radeon-specific functions
* (the texture functions are especially important)
*/
_mesa_init_driver_functions( &functions );
_tnl_init_driver_draw_function( &functions );
radeonInitTextureFuncs( &rmesa->radeon, &functions );
radeonInitQueryObjFunctions(&functions);
if (!radeonInitContext(&rmesa->radeon, api, &functions,
glVisual, driContextPriv,
sharedContextPrivate)) {
align_free(rmesa);
*error = __DRI_CTX_ERROR_NO_MEMORY;
return GL_FALSE;
}
rmesa->radeon.swtcl.RenderIndex = ~0;
rmesa->radeon.hw.all_dirty = GL_TRUE;
ctx = &rmesa->radeon.glCtx;
driContextSetFlags(ctx, ctx_config->flags);
/* Initialize the software rasterizer and helper modules.
*/
_swrast_CreateContext( ctx );
_vbo_CreateContext( ctx, false );
_tnl_CreateContext( ctx );
_swsetup_CreateContext( ctx );
ctx->Const.MaxTextureUnits = driQueryOptioni (&rmesa->radeon.optionCache,
"texture_units");
ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = ctx->Const.MaxTextureUnits;
ctx->Const.MaxTextureCoordUnits = ctx->Const.MaxTextureUnits;
ctx->Const.MaxCombinedTextureImageUnits = ctx->Const.MaxTextureUnits;
ctx->Const.StripTextureBorder = GL_TRUE;
/* FIXME: When no memory manager is available we should set this
* to some reasonable value based on texture memory pool size */
ctx->Const.MaxTextureSize = 2048;
ctx->Const.Max3DTextureLevels = 9;
ctx->Const.MaxCubeTextureLevels = 12;
ctx->Const.MaxTextureRectSize = 2048;
ctx->Const.MaxTextureMaxAnisotropy = 16.0;
/* No wide points.
*/
ctx->Const.MinPointSize = 1.0;
ctx->Const.MinPointSizeAA = 1.0;
ctx->Const.MaxPointSize = 1.0;
ctx->Const.MaxPointSizeAA = 1.0;
ctx->Const.MinLineWidth = 1.0;
ctx->Const.MinLineWidthAA = 1.0;
ctx->Const.MaxLineWidth = 10.0;
ctx->Const.MaxLineWidthAA = 10.0;
ctx->Const.LineWidthGranularity = 0.0625;
/* Set maxlocksize (and hence vb size) small enough to avoid
* fallbacks in radeon_tcl.c. ie. guarentee that all vertices can
* fit in a single dma buffer for indexed rendering of quad strips,
* etc.
*/
ctx->Const.MaxArrayLockSize =
MIN2( ctx->Const.MaxArrayLockSize,
RADEON_BUFFER_SIZE / RADEON_MAX_TCL_VERTSIZE );
rmesa->boxes = 0;
ctx->Const.MaxDrawBuffers = 1;
ctx->Const.MaxColorAttachments = 1;
ctx->Const.MaxRenderbufferSize = 2048;
ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = true;
/* Install the customized pipeline:
*/
_tnl_destroy_pipeline( ctx );
_tnl_install_pipeline( ctx, radeon_pipeline );
/* Try and keep materials and vertices separate:
*/
/* _tnl_isolate_materials( ctx, GL_TRUE ); */
/* Configure swrast and T&L to match hardware characteristics:
*/
_swrast_allow_pixel_fog( ctx, GL_FALSE );
_swrast_allow_vertex_fog( ctx, GL_TRUE );
_tnl_allow_pixel_fog( ctx, GL_FALSE );
_tnl_allow_vertex_fog( ctx, GL_TRUE );
for ( i = 0 ; i < RADEON_MAX_TEXTURE_UNITS ; i++ ) {
_math_matrix_ctr( &rmesa->TexGenMatrix[i] );
_math_matrix_ctr( &rmesa->tmpmat[i] );
_math_matrix_set_identity( &rmesa->TexGenMatrix[i] );
_math_matrix_set_identity( &rmesa->tmpmat[i] );
}
ctx->Extensions.ARB_occlusion_query = true;
ctx->Extensions.ARB_texture_border_clamp = true;
ctx->Extensions.ARB_texture_cube_map = true;
ctx->Extensions.ARB_texture_env_combine = true;
ctx->Extensions.ARB_texture_env_crossbar = true;
ctx->Extensions.ARB_texture_env_dot3 = true;
ctx->Extensions.ARB_texture_filter_anisotropic = true;
ctx->Extensions.ARB_texture_mirror_clamp_to_edge = true;
ctx->Extensions.ATI_texture_env_combine3 = true;
ctx->Extensions.ATI_texture_mirror_once = true;
ctx->Extensions.EXT_texture_env_dot3 = true;
ctx->Extensions.EXT_texture_filter_anisotropic = true;
ctx->Extensions.EXT_texture_mirror_clamp = true;
ctx->Extensions.MESA_ycbcr_texture = true;
ctx->Extensions.NV_texture_rectangle = true;
ctx->Extensions.OES_EGL_image = true;
ctx->Extensions.EXT_texture_compression_s3tc = true;
ctx->Extensions.ANGLE_texture_compression_dxt = true;
/* XXX these should really go right after _mesa_init_driver_functions() */
radeon_fbo_init(&rmesa->radeon);
radeonInitSpanFuncs( ctx );
radeonInitIoctlFuncs( ctx );
radeonInitStateFuncs( ctx );
radeonInitState( rmesa );
radeonInitSwtcl( ctx );
_mesa_vector4f_alloc( &rmesa->tcl.ObjClean, 0,
ctx->Const.MaxArrayLockSize, 32 );
fthrottle_mode = driQueryOptioni(&rmesa->radeon.optionCache, "fthrottle_mode");
rmesa->radeon.iw.irq_seq = -1;
rmesa->radeon.irqsEmitted = 0;
rmesa->radeon.do_irqs = (rmesa->radeon.radeonScreen->irq != 0 &&
fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS);
rmesa->radeon.do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS);
tcl_mode = driQueryOptioni(&rmesa->radeon.optionCache, "tcl_mode");
if (getenv("RADEON_NO_RAST")) {
fprintf(stderr, "disabling 3D acceleration\n");
FALLBACK(rmesa, RADEON_FALLBACK_DISABLE, 1);
} else if (tcl_mode == DRI_CONF_TCL_SW ||
!(rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL)) {
if (rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL) {
rmesa->radeon.radeonScreen->chip_flags &= ~RADEON_CHIPSET_TCL;
fprintf(stderr, "Disabling HW TCL support\n");
}
TCL_FALLBACK(&rmesa->radeon.glCtx, RADEON_TCL_FALLBACK_TCL_DISABLE, 1);
}
if (rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL) {
/* _tnl_need_dlist_norm_lengths( ctx, GL_FALSE ); */
}
_mesa_override_extensions(ctx);
_mesa_compute_version(ctx);
/* Exec table initialization requires the version to be computed */
_mesa_initialize_dispatch_tables(ctx);
_mesa_initialize_vbo_vtxfmt(ctx);
*error = __DRI_CTX_ERROR_SUCCESS;
return GL_TRUE;
}

View File

@ -1,461 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VA Linux Systems Inc., Fremont, California.
Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
The Weather Channel (TM) funded Tungsten Graphics to develop the
initial release of the Radeon 8500 driver under the XFree86 license.
This notice must be preserved.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Gareth Hughes <gareth@valinux.com>
* Keith Whitwell <keithw@vmware.com>
* Kevin E. Martin <martin@valinux.com>
* Nicolai Haehnle <prefect_@gmx.net>
*/
#ifndef __RADEON_CONTEXT_H__
#define __RADEON_CONTEXT_H__
#include "tnl/t_vertex.h"
#include "dri_util.h"
#include "drm-uapi/drm.h"
#include "radeon_drm.h"
#include "main/macros.h"
#include "main/mtypes.h"
#include "radeon_screen.h"
#include "radeon_common.h"
struct r100_context;
typedef struct r100_context r100ContextRec;
typedef struct r100_context *r100ContextPtr;
#define R100_TEX_ALL 0x7
/* used for both tcl_vtx and vc_frmt tex bits (they are identical) */
#define RADEON_ST_BIT(unit) \
(unit == 0 ? RADEON_CP_VC_FRMT_ST0 : (RADEON_CP_VC_FRMT_ST1 >> 2) << (2 * unit))
#define RADEON_Q_BIT(unit) \
(unit == 0 ? RADEON_CP_VC_FRMT_Q0 : (RADEON_CP_VC_FRMT_Q1 >> 2) << (2 * unit))
struct radeon_texture_env_state {
radeonTexObjPtr texobj;
GLenum format;
GLenum envMode;
};
struct radeon_texture_state {
struct radeon_texture_env_state unit[RADEON_MAX_TEXTURE_UNITS];
};
/* Trying to keep these relatively short as the variables are becoming
* extravagently long. Drop the driver name prefix off the front of
* everything - I think we know which driver we're in by now, and keep the
* prefix to 3 letters unless absolutely impossible.
*/
#define CTX_CMD_0 0
#define CTX_PP_MISC 1
#define CTX_PP_FOG_COLOR 2
#define CTX_RE_SOLID_COLOR 3
#define CTX_RB3D_BLENDCNTL 4
#define CTX_RB3D_DEPTHOFFSET 5
#define CTX_RB3D_DEPTHPITCH 6
#define CTX_RB3D_ZSTENCILCNTL 7
#define CTX_CMD_1 8
#define CTX_PP_CNTL 9
#define CTX_RB3D_CNTL 10
#define CTX_RB3D_COLOROFFSET 11
#define CTX_CMD_2 12
#define CTX_RB3D_COLORPITCH 13
#define CTX_STATE_SIZE 14
#define SET_CMD_0 0
#define SET_SE_CNTL 1
#define SET_SE_COORDFMT 2
#define SET_CMD_1 3
#define SET_SE_CNTL_STATUS 4
#define SET_STATE_SIZE 5
#define LIN_CMD_0 0
#define LIN_RE_LINE_PATTERN 1
#define LIN_RE_LINE_STATE 2
#define LIN_CMD_1 3
#define LIN_SE_LINE_WIDTH 4
#define LIN_STATE_SIZE 5
#define MSK_CMD_0 0
#define MSK_RB3D_STENCILREFMASK 1
#define MSK_RB3D_ROPCNTL 2
#define MSK_RB3D_PLANEMASK 3
#define MSK_STATE_SIZE 4
#define VPT_CMD_0 0
#define VPT_SE_VPORT_XSCALE 1
#define VPT_SE_VPORT_XOFFSET 2
#define VPT_SE_VPORT_YSCALE 3
#define VPT_SE_VPORT_YOFFSET 4
#define VPT_SE_VPORT_ZSCALE 5
#define VPT_SE_VPORT_ZOFFSET 6
#define VPT_STATE_SIZE 7
#define MSC_CMD_0 0
#define MSC_RE_MISC 1
#define MSC_STATE_SIZE 2
#define TEX_CMD_0 0
#define TEX_PP_TXFILTER 1
#define TEX_PP_TXFORMAT 2
#define TEX_PP_TXOFFSET 3
#define TEX_PP_TXCBLEND 4
#define TEX_PP_TXABLEND 5
#define TEX_PP_TFACTOR 6
#define TEX_CMD_1 7
#define TEX_PP_BORDER_COLOR 8
#define TEX_STATE_SIZE 9
#define TXR_CMD_0 0 /* rectangle textures */
#define TXR_PP_TEX_SIZE 1 /* 0x1d04, 0x1d0c for NPOT! */
#define TXR_PP_TEX_PITCH 2 /* 0x1d08, 0x1d10 for NPOT! */
#define TXR_STATE_SIZE 3
#define CUBE_CMD_0 0
#define CUBE_PP_CUBIC_FACES 1
#define CUBE_CMD_1 2
#define CUBE_PP_CUBIC_OFFSET_0 3
#define CUBE_PP_CUBIC_OFFSET_1 4
#define CUBE_PP_CUBIC_OFFSET_2 5
#define CUBE_PP_CUBIC_OFFSET_3 6
#define CUBE_PP_CUBIC_OFFSET_4 7
#define CUBE_STATE_SIZE 8
#define ZBS_CMD_0 0
#define ZBS_SE_ZBIAS_FACTOR 1
#define ZBS_SE_ZBIAS_CONSTANT 2
#define ZBS_STATE_SIZE 3
#define TCL_CMD_0 0
#define TCL_OUTPUT_VTXFMT 1
#define TCL_OUTPUT_VTXSEL 2
#define TCL_MATRIX_SELECT_0 3
#define TCL_MATRIX_SELECT_1 4
#define TCL_UCP_VERT_BLEND_CTL 5
#define TCL_TEXTURE_PROC_CTL 6
#define TCL_LIGHT_MODEL_CTL 7
#define TCL_PER_LIGHT_CTL_0 8
#define TCL_PER_LIGHT_CTL_1 9
#define TCL_PER_LIGHT_CTL_2 10
#define TCL_PER_LIGHT_CTL_3 11
#define TCL_STATE_SIZE 12
#define MTL_CMD_0 0
#define MTL_EMMISSIVE_RED 1
#define MTL_EMMISSIVE_GREEN 2
#define MTL_EMMISSIVE_BLUE 3
#define MTL_EMMISSIVE_ALPHA 4
#define MTL_AMBIENT_RED 5
#define MTL_AMBIENT_GREEN 6
#define MTL_AMBIENT_BLUE 7
#define MTL_AMBIENT_ALPHA 8
#define MTL_DIFFUSE_RED 9
#define MTL_DIFFUSE_GREEN 10
#define MTL_DIFFUSE_BLUE 11
#define MTL_DIFFUSE_ALPHA 12
#define MTL_SPECULAR_RED 13
#define MTL_SPECULAR_GREEN 14
#define MTL_SPECULAR_BLUE 15
#define MTL_SPECULAR_ALPHA 16
#define MTL_SHININESS 17
#define MTL_STATE_SIZE 18
#define VTX_CMD_0 0
#define VTX_SE_COORD_FMT 1
#define VTX_STATE_SIZE 2
#define MAT_CMD_0 0
#define MAT_ELT_0 1
#define MAT_STATE_SIZE 17
#define GRD_CMD_0 0
#define GRD_VERT_GUARD_CLIP_ADJ 1
#define GRD_VERT_GUARD_DISCARD_ADJ 2
#define GRD_HORZ_GUARD_CLIP_ADJ 3
#define GRD_HORZ_GUARD_DISCARD_ADJ 4
#define GRD_STATE_SIZE 5
/* position changes frequently when lighting in modelpos - separate
* out to new state item?
*/
#define LIT_CMD_0 0
#define LIT_AMBIENT_RED 1
#define LIT_AMBIENT_GREEN 2
#define LIT_AMBIENT_BLUE 3
#define LIT_AMBIENT_ALPHA 4
#define LIT_DIFFUSE_RED 5
#define LIT_DIFFUSE_GREEN 6
#define LIT_DIFFUSE_BLUE 7
#define LIT_DIFFUSE_ALPHA 8
#define LIT_SPECULAR_RED 9
#define LIT_SPECULAR_GREEN 10
#define LIT_SPECULAR_BLUE 11
#define LIT_SPECULAR_ALPHA 12
#define LIT_POSITION_X 13
#define LIT_POSITION_Y 14
#define LIT_POSITION_Z 15
#define LIT_POSITION_W 16
#define LIT_DIRECTION_X 17
#define LIT_DIRECTION_Y 18
#define LIT_DIRECTION_Z 19
#define LIT_DIRECTION_W 20
#define LIT_ATTEN_QUADRATIC 21
#define LIT_ATTEN_LINEAR 22
#define LIT_ATTEN_CONST 23
#define LIT_ATTEN_XXX 24
#define LIT_CMD_1 25
#define LIT_SPOT_DCD 26
#define LIT_SPOT_EXPONENT 27
#define LIT_SPOT_CUTOFF 28
#define LIT_SPECULAR_THRESH 29
#define LIT_RANGE_CUTOFF 30 /* ? */
#define LIT_ATTEN_CONST_INV 31
#define LIT_STATE_SIZE 32
/* Fog
*/
#define FOG_CMD_0 0
#define FOG_R 1
#define FOG_C 2
#define FOG_D 3
#define FOG_PAD 4
#define FOG_STATE_SIZE 5
/* UCP
*/
#define UCP_CMD_0 0
#define UCP_X 1
#define UCP_Y 2
#define UCP_Z 3
#define UCP_W 4
#define UCP_STATE_SIZE 5
/* GLT - Global ambient
*/
#define GLT_CMD_0 0
#define GLT_RED 1
#define GLT_GREEN 2
#define GLT_BLUE 3
#define GLT_ALPHA 4
#define GLT_STATE_SIZE 5
/* EYE
*/
#define EYE_CMD_0 0
#define EYE_X 1
#define EYE_Y 2
#define EYE_Z 3
#define EYE_RESCALE_FACTOR 4
#define EYE_STATE_SIZE 5
#define SHN_CMD_0 0
#define SHN_SHININESS 1
#define SHN_STATE_SIZE 2
#define R100_QUERYOBJ_CMD_0 0
#define R100_QUERYOBJ_DATA_0 1
#define R100_QUERYOBJ_CMDSIZE 2
#define STP_CMD_0 0
#define STP_DATA_0 1
#define STP_CMD_1 2
#define STP_STATE_SIZE 35
struct r100_hw_state {
/* Hardware state, stored as cmdbuf commands:
* -- Need to doublebuffer for
* - eliding noop statechange loops? (except line stipple count)
*/
struct radeon_state_atom ctx;
struct radeon_state_atom set;
struct radeon_state_atom lin;
struct radeon_state_atom msk;
struct radeon_state_atom vpt;
struct radeon_state_atom tcl;
struct radeon_state_atom msc;
struct radeon_state_atom tex[3];
struct radeon_state_atom cube[3];
struct radeon_state_atom zbs;
struct radeon_state_atom mtl;
struct radeon_state_atom mat[6];
struct radeon_state_atom lit[8]; /* includes vec, scl commands */
struct radeon_state_atom ucp[6];
struct radeon_state_atom eye; /* eye pos */
struct radeon_state_atom grd; /* guard band clipping */
struct radeon_state_atom fog;
struct radeon_state_atom glt;
struct radeon_state_atom txr[3]; /* for NPOT */
struct radeon_state_atom stp;
};
struct radeon_stipple_state {
GLuint mask[32];
};
struct r100_state {
struct radeon_stipple_state stipple;
struct radeon_texture_state texture;
};
#define RADEON_CMD_BUF_SZ (8*1024)
#define R200_ELT_BUF_SZ (8*1024)
/* radeon_tcl.c
*/
struct r100_tcl_info {
GLuint vertex_format;
GLuint hw_primitive;
/* Temporary for cases where incoming vertex data is incompatible
* with maos code.
*/
GLvector4f ObjClean;
GLuint *Elts;
int elt_cmd_offset;
int elt_cmd_start;
int elt_used;
};
/* radeon_swtcl.c
*/
struct r100_swtcl_info {
GLuint vertex_format;
GLubyte *verts;
/* Fallback rasterization functions
*/
radeon_point_func draw_point;
radeon_line_func draw_line;
radeon_tri_func draw_tri;
/**
* Offset of the 4UB color data within a hardware (swtcl) vertex.
*/
GLuint coloroffset;
/**
* Offset of the 3UB specular color data within a hardware (swtcl) vertex.
*/
GLuint specoffset;
GLboolean needproj;
};
/* A maximum total of 20 elements per vertex: 3 floats for position, 3
* floats for normal, 4 floats for color, 4 bytes for secondary color,
* 3 floats for each texture unit (9 floats total).
*
* The position data is never actually stored here, so 3 elements could be
* trimmed out of the buffer. This number is only valid for vtxfmt!
*/
#define RADEON_MAX_VERTEX_SIZE 20
struct r100_context {
struct radeon_context radeon;
/* Driver and hardware state management
*/
struct r100_hw_state hw;
struct r100_state state;
/* Vertex buffers
*/
struct radeon_ioctl ioctl;
struct radeon_store store;
/* TCL stuff
*/
GLmatrix TexGenMatrix[RADEON_MAX_TEXTURE_UNITS];
GLboolean recheck_texgen[RADEON_MAX_TEXTURE_UNITS];
GLboolean TexGenNeedNormals[RADEON_MAX_TEXTURE_UNITS];
GLuint TexGenEnabled;
GLuint NeedTexMatrix;
GLuint TexMatColSwap;
GLmatrix tmpmat[RADEON_MAX_TEXTURE_UNITS];
/* radeon_tcl.c
*/
struct r100_tcl_info tcl;
/* radeon_swtcl.c
*/
struct r100_swtcl_info swtcl;
GLboolean using_hyperz;
/* Performance counters
*/
GLuint boxes; /* Draw performance boxes */
GLuint hardwareWentIdle;
GLuint c_clears;
GLuint c_drawWaits;
GLuint c_textureSwaps;
GLuint c_textureBytes;
GLuint c_vertexBuffers;
};
static inline r100ContextPtr
R100_CONTEXT(struct gl_context *ctx)
{
return (r100ContextPtr) ctx;
}
#define RADEON_OLD_PACKETS 1
extern GLboolean r100CreateContext( gl_api api,
const struct gl_config *glVisual,
__DRIcontext *driContextPriv,
const struct __DriverContextConfig *
ctx_config,
unsigned *error,
void *sharedContextPrivate);
#endif /* __RADEON_CONTEXT_H__ */

View File

@ -1,112 +0,0 @@
/*
* Copyright © 2009 Pauli Nieminen
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Pauli Nieminen <suokkos@gmail.com>
*/
#include "util/debug.h"
#include "radeon_common_context.h"
#include "radeon_debug.h"
#include <stdarg.h>
#include <stdio.h>
static const struct debug_control debug_control[] = {
{"fall", RADEON_FALLBACKS},
{"tex", RADEON_TEXTURE},
{"ioctl", RADEON_IOCTL},
{"verts", RADEON_VERTS},
{"render", RADEON_RENDER},
{"swrender", RADEON_SWRENDER},
{"state", RADEON_STATE},
{"shader", RADEON_SHADER},
{"vfmt", RADEON_VFMT},
{"vtxf", RADEON_VFMT},
{"dri", RADEON_DRI},
{"dma", RADEON_DMA},
{"sanity", RADEON_SANITY},
{"sync", RADEON_SYNC},
{"pixel", RADEON_PIXEL},
{"mem", RADEON_MEMORY},
{"cs", RADEON_CS},
{"allmsg", ~RADEON_SYNC}, /* avoid the term "sync" because the parser uses strstr */
{NULL, 0}
};
#if defined(RADEON_R200)
radeon_debug_type_t r200_enabled_debug_types;
#elif defined(RADEON_R100)
radeon_debug_type_t r100_enabled_debug_types;
#endif
void radeon_init_debug(void)
{
RADEON_DEBUG = parse_debug_string(getenv("RADEON_DEBUG"), debug_control);
RADEON_DEBUG |= RADEON_GENERAL;
}
void _radeon_debug_add_indent(void)
{
GET_CURRENT_CONTEXT(ctx);
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
const size_t length = sizeof(radeon->debug.indent)
/ sizeof(radeon->debug.indent[0]);
if (radeon->debug.indent_depth < length - 1) {
radeon->debug.indent[radeon->debug.indent_depth] = '\t';
++radeon->debug.indent_depth;
}
}
void _radeon_debug_remove_indent(void)
{
GET_CURRENT_CONTEXT(ctx);
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
if (radeon->debug.indent_depth > 0) {
radeon->debug.indent[radeon->debug.indent_depth] = '\0';
--radeon->debug.indent_depth;
}
}
void _radeon_print(const radeon_debug_type_t type,
const radeon_debug_level_t level,
const char* message,
...)
{
va_list values;
GET_CURRENT_CONTEXT(ctx);
if (ctx) {
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
// FIXME: Make this multi thread safe
if (radeon->debug.indent_depth)
fprintf(stderr, "%s", radeon->debug.indent);
}
va_start( values, message );
vfprintf(stderr, message, values);
va_end( values );
}

View File

@ -1,173 +0,0 @@
/*
* Copyright © 2009 Pauli Nieminen
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Pauli Nieminen <suokkos@gmail.com>
*/
#ifndef RADEON_DEBUG_H_INCLUDED
#define RADEON_DEBUG_H_INCLUDED
#include <stdlib.h>
typedef enum radeon_debug_levels {
RADEON_CRITICAL = 0, /* Only errors */
RADEON_IMPORTANT = 1, /* Important warnings and messages */
RADEON_NORMAL = 2, /* Normal log messages usefull for debugging */
RADEON_VERBOSE = 3, /* Extra details to debugging */
RADEON_TRACE = 4 /* Log about everything that happens */
} radeon_debug_level_t;
/**
* Compile time option to change level of debugging compiled to dri driver.
* Selecting critical level is not recommended because perfromance gains are
* going to minimal but you will lose a lot of important warnings in case of
* errors.
*/
#ifndef RADEON_DEBUG_LEVEL
# ifdef DEBUG
# define RADEON_DEBUG_LEVEL RADEON_TRACE
# else
# define RADEON_DEBUG_LEVEL RADEON_VERBOSE
# endif
#endif
typedef enum radeon_debug_types {
RADEON_TEXTURE = 0x00001,
RADEON_STATE = 0x00002,
RADEON_IOCTL = 0x00004,
RADEON_RENDER = 0x00008,
RADEON_SWRENDER = 0x00010,
RADEON_FALLBACKS = 0x00020,
RADEON_VFMT = 0x00040,
RADEON_SHADER = 0x00080,
RADEON_CS = 0x00100,
RADEON_DRI = 0x00200,
RADEON_DMA = 0x00400,
RADEON_SANITY = 0x00800,
RADEON_SYNC = 0x01000,
RADEON_PIXEL = 0x02000,
RADEON_MEMORY = 0x04000,
RADEON_VERTS = 0x08000,
RADEON_GENERAL = 0x10000 /* Used for errors and warnings */
} radeon_debug_type_t;
#define RADEON_MAX_INDENT 5
struct radeon_debug {
size_t indent_depth;
char indent[RADEON_MAX_INDENT];
};
/**
* Compabibility layer for old debug code
**/
#if defined(RADEON_R200)
extern radeon_debug_type_t r200_enabled_debug_types;
#define RADEON_DEBUG r200_enabled_debug_types
#elif defined(RADEON_R100)
extern radeon_debug_type_t r100_enabled_debug_types;
#define RADEON_DEBUG r100_enabled_debug_types
#else
#error "Neither RADEON_R100 nor RADEON_R200 are defined."
#endif
static inline int radeon_is_debug_enabled(const radeon_debug_type_t type,
const radeon_debug_level_t level)
{
return RADEON_DEBUG_LEVEL >= level
&& (type & RADEON_DEBUG);
}
extern void _radeon_print(const radeon_debug_type_t type,
const radeon_debug_level_t level,
const char* message,
...) PRINTFLIKE(3, 4);
/**
* Print out debug message if channel specified by type is enabled
* and compile time debugging level is at least as high as level parameter
*/
#define radeon_print(type, level, ...) do { \
const radeon_debug_level_t _debug_level = (level); \
const radeon_debug_type_t _debug_type = (type); \
/* Compile out if level of message is too high */ \
if (radeon_is_debug_enabled(type, level)) { \
_radeon_print(_debug_type, _debug_level, \
__VA_ARGS__); \
} \
} while(0)
/**
* printf style function for writing error messages.
*/
#define radeon_error(...) do { \
radeon_print(RADEON_GENERAL, RADEON_CRITICAL, \
__VA_ARGS__); \
} while(0)
/**
* printf style function for writing warnings.
*/
#define radeon_warning(...) do { \
radeon_print(RADEON_GENERAL, RADEON_IMPORTANT, \
__VA_ARGS__); \
} while(0)
extern void radeon_init_debug(void);
extern void _radeon_debug_add_indent(void);
extern void _radeon_debug_remove_indent(void);
static inline void radeon_debug_add_indent(void)
{
if (RADEON_DEBUG_LEVEL >= RADEON_VERBOSE) {
_radeon_debug_add_indent();
}
}
static inline void radeon_debug_remove_indent(void)
{
if (RADEON_DEBUG_LEVEL >= RADEON_VERBOSE) {
_radeon_debug_remove_indent();
}
}
/* From http://gcc. gnu.org/onlinedocs/gcc-3.2.3/gcc/Variadic-Macros.html .
I suppose we could inline this and use macro to fetch out __LINE__ and stuff in case we run into trouble
with other compilers ... GLUE!
*/
#define WARN_ONCE(...) do { \
static int __warn_once=1; \
if(__warn_once){ \
radeon_warning("*********************************WARN_ONCE*********************************\n"); \
radeon_warning("File %s function %s line %d\n", \
__FILE__, __func__, __LINE__); \
radeon_warning(__VA_ARGS__);\
radeon_warning("***************************************************************************\n"); \
__warn_once=0;\
} \
} while(0)
#endif

View File

@ -1,512 +0,0 @@
/**************************************************************************
Copyright (C) 2004 Nicolai Haehnle.
Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
The Weather Channel (TM) funded Tungsten Graphics to develop the
initial release of the Radeon 8500 driver under the XFree86 license.
This notice must be preserved.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
on the rights to use, copy, modify, merge, publish, distribute, sub
license, and/or sell copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice (including the next
paragraph) shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
ATI, VA LINUX SYSTEMS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
#include <errno.h>
#include "radeon_common.h"
#include "radeon_fog.h"
#include "util/simple_list.h"
#include "util/u_memory.h"
#if defined(USE_X86_ASM)
#define COPY_DWORDS( dst, src, nr ) \
do { \
int __tmp; \
__asm__ __volatile__( "rep ; movsl" \
: "=%c" (__tmp), "=D" (dst), "=S" (__tmp) \
: "0" (nr), \
"D" ((long)dst), \
"S" ((long)src) ); \
} while (0)
#else
#define COPY_DWORDS( dst, src, nr ) \
do { \
int j; \
for ( j = 0 ; j < nr ; j++ ) \
dst[j] = ((int *)src)[j]; \
dst += nr; \
} while (0)
#endif
void radeonEmitVec4(uint32_t *out, const GLvoid * data, int stride, int count)
{
int i;
if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d out %p data %p\n",
__func__, count, stride, (void *)out, (void *)data);
if (stride == 4)
COPY_DWORDS(out, data, count);
else
for (i = 0; i < count; i++) {
out[0] = *(int *)data;
out++;
data += stride;
}
}
void radeonEmitVec8(uint32_t *out, const GLvoid * data, int stride, int count)
{
int i;
if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d out %p data %p\n",
__func__, count, stride, (void *)out, (void *)data);
if (stride == 8)
COPY_DWORDS(out, data, count * 2);
else
for (i = 0; i < count; i++) {
out[0] = *(int *)data;
out[1] = *(int *)(data + 4);
out += 2;
data += stride;
}
}
void radeonEmitVec12(uint32_t *out, const GLvoid * data, int stride, int count)
{
int i;
if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d out %p data %p\n",
__func__, count, stride, (void *)out, (void *)data);
if (stride == 12) {
COPY_DWORDS(out, data, count * 3);
}
else
for (i = 0; i < count; i++) {
out[0] = *(int *)data;
out[1] = *(int *)(data + 4);
out[2] = *(int *)(data + 8);
out += 3;
data += stride;
}
}
void radeonEmitVec16(uint32_t *out, const GLvoid * data, int stride, int count)
{
int i;
if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d out %p data %p\n",
__func__, count, stride, (void *)out, (void *)data);
if (stride == 16)
COPY_DWORDS(out, data, count * 4);
else
for (i = 0; i < count; i++) {
out[0] = *(int *)data;
out[1] = *(int *)(data + 4);
out[2] = *(int *)(data + 8);
out[3] = *(int *)(data + 12);
out += 4;
data += stride;
}
}
void rcommon_emit_vector(struct gl_context * ctx, struct radeon_aos *aos,
const GLvoid * data, int size, int stride, int count)
{
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
uint32_t *out;
if (stride == 0) {
radeonAllocDmaRegion(rmesa, &aos->bo, &aos->offset, size * 4, 32);
count = 1;
aos->stride = 0;
} else {
radeonAllocDmaRegion(rmesa, &aos->bo, &aos->offset, size * count * 4, 32);
aos->stride = size;
}
aos->components = size;
aos->count = count;
radeon_bo_map(aos->bo, 1);
out = (uint32_t*)((char*)aos->bo->ptr + aos->offset);
switch (size) {
case 1: radeonEmitVec4(out, data, stride, count); break;
case 2: radeonEmitVec8(out, data, stride, count); break;
case 3: radeonEmitVec12(out, data, stride, count); break;
case 4: radeonEmitVec16(out, data, stride, count); break;
default:
assert(0);
break;
}
radeon_bo_unmap(aos->bo);
}
void rcommon_emit_vecfog(struct gl_context *ctx, struct radeon_aos *aos,
GLvoid *data, int stride, int count)
{
int i;
float *out;
int size = 1;
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d\n",
__func__, count, stride);
if (stride == 0) {
radeonAllocDmaRegion( rmesa, &aos->bo, &aos->offset, size * 4, 32 );
count = 1;
aos->stride = 0;
} else {
radeonAllocDmaRegion(rmesa, &aos->bo, &aos->offset, size * count * 4, 32);
aos->stride = size;
}
aos->components = size;
aos->count = count;
/* Emit the data */
radeon_bo_map(aos->bo, 1);
out = (float*)((char*)aos->bo->ptr + aos->offset);
for (i = 0; i < count; i++) {
out[0] = radeonComputeFogBlendFactor( ctx, *(GLfloat *)data );
out++;
data += stride;
}
radeon_bo_unmap(aos->bo);
}
void radeon_init_dma(radeonContextPtr rmesa)
{
make_empty_list(&rmesa->dma.free);
make_empty_list(&rmesa->dma.wait);
make_empty_list(&rmesa->dma.reserved);
rmesa->dma.minimum_size = MAX_DMA_BUF_SZ;
}
void radeonRefillCurrentDmaRegion(radeonContextPtr rmesa, int size)
{
struct radeon_dma_bo *dma_bo = NULL;
/* we set minimum sizes to at least requested size
aligned to next 16 bytes. */
if (size > rmesa->dma.minimum_size)
rmesa->dma.minimum_size = (size + 15) & (~15);
radeon_print(RADEON_DMA, RADEON_NORMAL, "%s size %d minimum_size %zi\n",
__func__, size, rmesa->dma.minimum_size);
if (is_empty_list(&rmesa->dma.free)
|| last_elem(&rmesa->dma.free)->bo->size < size) {
dma_bo = CALLOC_STRUCT(radeon_dma_bo);
assert(dma_bo);
again_alloc:
dma_bo->bo = radeon_bo_open(rmesa->radeonScreen->bom,
0, rmesa->dma.minimum_size, 4,
RADEON_GEM_DOMAIN_GTT, 0);
if (!dma_bo->bo) {
rcommonFlushCmdBuf(rmesa, __func__);
goto again_alloc;
}
insert_at_head(&rmesa->dma.reserved, dma_bo);
} else {
/* We push and pop buffers from end of list so we can keep
counter on unused buffers for later freeing them from
begin of list */
dma_bo = last_elem(&rmesa->dma.free);
remove_from_list(dma_bo);
insert_at_head(&rmesa->dma.reserved, dma_bo);
}
rmesa->dma.current_used = 0;
rmesa->dma.current_vertexptr = 0;
if (radeon_cs_space_check_with_bo(rmesa->cmdbuf.cs,
first_elem(&rmesa->dma.reserved)->bo,
RADEON_GEM_DOMAIN_GTT, 0))
fprintf(stderr,"failure to revalidate BOs - badness\n");
if (is_empty_list(&rmesa->dma.reserved)) {
/* Cmd buff have been flushed in radeon_revalidate_bos */
goto again_alloc;
}
radeon_bo_map(first_elem(&rmesa->dma.reserved)->bo, 1);
}
/* Allocates a region from rmesa->dma.current. If there isn't enough
* space in current, grab a new buffer (and discard what was left of current)
*/
void radeonAllocDmaRegion(radeonContextPtr rmesa,
struct radeon_bo **pbo, int *poffset,
int bytes, int alignment)
{
if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s %d\n", __func__, bytes);
if (rmesa->dma.flush)
rmesa->dma.flush(&rmesa->glCtx);
assert(rmesa->dma.current_used == rmesa->dma.current_vertexptr);
alignment--;
rmesa->dma.current_used = (rmesa->dma.current_used + alignment) & ~alignment;
if (is_empty_list(&rmesa->dma.reserved)
|| rmesa->dma.current_used + bytes > first_elem(&rmesa->dma.reserved)->bo->size)
radeonRefillCurrentDmaRegion(rmesa, bytes);
*poffset = rmesa->dma.current_used;
*pbo = first_elem(&rmesa->dma.reserved)->bo;
radeon_bo_ref(*pbo);
/* Always align to at least 16 bytes */
rmesa->dma.current_used = (rmesa->dma.current_used + bytes + 15) & ~15;
rmesa->dma.current_vertexptr = rmesa->dma.current_used;
assert(rmesa->dma.current_used <= first_elem(&rmesa->dma.reserved)->bo->size);
}
void radeonFreeDmaRegions(radeonContextPtr rmesa)
{
struct radeon_dma_bo *dma_bo;
struct radeon_dma_bo *temp;
if (RADEON_DEBUG & RADEON_DMA)
fprintf(stderr, "%s\n", __func__);
foreach_s(dma_bo, temp, &rmesa->dma.free) {
remove_from_list(dma_bo);
radeon_bo_unref(dma_bo->bo);
free(dma_bo);
}
foreach_s(dma_bo, temp, &rmesa->dma.wait) {
remove_from_list(dma_bo);
radeon_bo_unref(dma_bo->bo);
free(dma_bo);
}
foreach_s(dma_bo, temp, &rmesa->dma.reserved) {
remove_from_list(dma_bo);
radeon_bo_unref(dma_bo->bo);
free(dma_bo);
}
}
void radeonReturnDmaRegion(radeonContextPtr rmesa, int return_bytes)
{
if (is_empty_list(&rmesa->dma.reserved))
return;
if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s %d\n", __func__, return_bytes);
rmesa->dma.current_used -= return_bytes;
rmesa->dma.current_vertexptr = rmesa->dma.current_used;
}
static int radeon_bo_is_idle(struct radeon_bo* bo)
{
uint32_t domain;
int ret = radeon_bo_is_busy(bo, &domain);
if (ret == -EINVAL) {
WARN_ONCE("Your libdrm or kernel doesn't have support for busy query.\n"
"This may cause small performance drop for you.\n");
}
return ret != -EBUSY;
}
void radeonReleaseDmaRegions(radeonContextPtr rmesa)
{
struct radeon_dma_bo *dma_bo;
struct radeon_dma_bo *temp;
const int expire_at = ++rmesa->dma.free.expire_counter + DMA_BO_FREE_TIME;
const int time = rmesa->dma.free.expire_counter;
if (RADEON_DEBUG & RADEON_DMA) {
size_t free = 0,
wait = 0,
reserved = 0;
foreach(dma_bo, &rmesa->dma.free)
++free;
foreach(dma_bo, &rmesa->dma.wait)
++wait;
foreach(dma_bo, &rmesa->dma.reserved)
++reserved;
fprintf(stderr, "%s: free %zu, wait %zu, reserved %zu, minimum_size: %zu\n",
__func__, free, wait, reserved, rmesa->dma.minimum_size);
}
/* move waiting bos to free list.
wait list provides gpu time to handle data before reuse */
foreach_s(dma_bo, temp, &rmesa->dma.wait) {
if (dma_bo->expire_counter == time) {
WARN_ONCE("Leaking dma buffer object!\n");
radeon_bo_unref(dma_bo->bo);
remove_from_list(dma_bo);
free(dma_bo);
continue;
}
/* free objects that are too small to be used because of large request */
if (dma_bo->bo->size < rmesa->dma.minimum_size) {
radeon_bo_unref(dma_bo->bo);
remove_from_list(dma_bo);
free(dma_bo);
continue;
}
if (!radeon_bo_is_idle(dma_bo->bo)) {
break;
}
remove_from_list(dma_bo);
dma_bo->expire_counter = expire_at;
insert_at_tail(&rmesa->dma.free, dma_bo);
}
/* move reserved to wait list */
foreach_s(dma_bo, temp, &rmesa->dma.reserved) {
radeon_bo_unmap(dma_bo->bo);
/* free objects that are too small to be used because of large request */
if (dma_bo->bo->size < rmesa->dma.minimum_size) {
radeon_bo_unref(dma_bo->bo);
remove_from_list(dma_bo);
free(dma_bo);
continue;
}
remove_from_list(dma_bo);
dma_bo->expire_counter = expire_at;
insert_at_tail(&rmesa->dma.wait, dma_bo);
}
/* free bos that have been unused for some time */
foreach_s(dma_bo, temp, &rmesa->dma.free) {
if (dma_bo->expire_counter != time)
break;
remove_from_list(dma_bo);
radeon_bo_unref(dma_bo->bo);
free(dma_bo);
}
}
/* Flush vertices in the current dma region.
*/
void rcommon_flush_last_swtcl_prim( struct gl_context *ctx )
{
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
struct radeon_dma *dma = &rmesa->dma;
if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s\n", __func__);
dma->flush = NULL;
radeon_bo_unmap(rmesa->swtcl.bo);
if (!is_empty_list(&dma->reserved)) {
GLuint current_offset = dma->current_used;
assert (dma->current_used +
rmesa->swtcl.numverts * rmesa->swtcl.vertex_size * 4 ==
dma->current_vertexptr);
if (dma->current_used != dma->current_vertexptr) {
dma->current_used = dma->current_vertexptr;
rmesa->vtbl.swtcl_flush(ctx, current_offset);
}
rmesa->swtcl.numverts = 0;
}
radeon_bo_unref(rmesa->swtcl.bo);
rmesa->swtcl.bo = NULL;
}
/* Alloc space in the current dma region.
*/
void *
rcommonAllocDmaLowVerts( radeonContextPtr rmesa, int nverts, int vsize )
{
GLuint bytes = vsize * nverts;
void *head;
if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s\n", __func__);
if(is_empty_list(&rmesa->dma.reserved)
||rmesa->dma.current_vertexptr + bytes > first_elem(&rmesa->dma.reserved)->bo->size) {
if (rmesa->dma.flush) {
rmesa->dma.flush(&rmesa->glCtx);
}
radeonRefillCurrentDmaRegion(rmesa, bytes);
return NULL;
}
if (!rmesa->dma.flush) {
/* if cmdbuf flushed DMA restart */
rmesa->glCtx.Driver.NeedFlush |= FLUSH_STORED_VERTICES;
rmesa->dma.flush = rcommon_flush_last_swtcl_prim;
}
assert( vsize == rmesa->swtcl.vertex_size * 4 );
assert( rmesa->dma.flush == rcommon_flush_last_swtcl_prim );
assert( rmesa->dma.current_used +
rmesa->swtcl.numverts * rmesa->swtcl.vertex_size * 4 ==
rmesa->dma.current_vertexptr );
if (!rmesa->swtcl.bo) {
rmesa->swtcl.bo = first_elem(&rmesa->dma.reserved)->bo;
radeon_bo_ref(rmesa->swtcl.bo);
radeon_bo_map(rmesa->swtcl.bo, 1);
}
head = (rmesa->swtcl.bo->ptr + rmesa->dma.current_vertexptr);
rmesa->dma.current_vertexptr += bytes;
rmesa->swtcl.numverts += nverts;
return head;
}
void radeonReleaseArrays( struct gl_context *ctx, GLuint newinputs )
{
radeonContextPtr radeon = RADEON_CONTEXT( ctx );
int i;
if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s\n", __func__);
if (radeon->dma.flush) {
radeon->dma.flush(&radeon->glCtx);
}
for (i = 0; i < radeon->tcl.aos_count; i++) {
if (radeon->tcl.aos[i].bo) {
radeon_bo_unref(radeon->tcl.aos[i].bo);
radeon->tcl.aos[i].bo = NULL;
}
}
}

View File

@ -1,60 +0,0 @@
/**************************************************************************
Copyright (C) 2004 Nicolai Haehnle.
Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
The Weather Channel (TM) funded Tungsten Graphics to develop the
initial release of the Radeon 8500 driver under the XFree86 license.
This notice must be preserved.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
on the rights to use, copy, modify, merge, publish, distribute, sub
license, and/or sell copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice (including the next
paragraph) shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
ATI, VA LINUX SYSTEMS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
#ifndef RADEON_DMA_H
#define RADEON_DMA_H
void radeonEmitVec4(uint32_t *out, const GLvoid * data, int stride, int count);
void radeonEmitVec8(uint32_t *out, const GLvoid * data, int stride, int count);
void radeonEmitVec12(uint32_t *out, const GLvoid * data, int stride, int count);
void radeonEmitVec16(uint32_t *out, const GLvoid * data, int stride, int count);
void rcommon_emit_vector(struct gl_context * ctx, struct radeon_aos *aos,
const GLvoid * data, int size, int stride, int count);
void rcommon_emit_vecfog(struct gl_context *ctx, struct radeon_aos *aos,
GLvoid *data, int stride, int count);
void radeonReturnDmaRegion(radeonContextPtr rmesa, int return_bytes);
void radeonRefillCurrentDmaRegion(radeonContextPtr rmesa, int size);
void radeon_init_dma(radeonContextPtr rmesa);
void radeonReturnDmaRegion(radeonContextPtr rmesa, int return_bytes);
void radeonAllocDmaRegion(radeonContextPtr rmesa,
struct radeon_bo **pbo, int *poffset,
int bytes, int alignment);
void radeonReleaseDmaRegions(radeonContextPtr rmesa);
void rcommon_flush_last_swtcl_prim(struct gl_context *ctx);
void *rcommonAllocDmaLowVerts(radeonContextPtr rmesa, int nverts, int vsize);
void radeonFreeDmaRegions(radeonContextPtr rmesa);
void radeonReleaseArrays( struct gl_context *ctx, GLuint newinputs );
#endif

View File

@ -1,893 +0,0 @@
/**************************************************************************
*
* Copyright 2008 Red Hat Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/macros.h"
#include "main/mtypes.h"
#include "main/enums.h"
#include "main/fbobject.h"
#include "main/framebuffer.h"
#include "main/renderbuffer.h"
#include "main/context.h"
#include "swrast/swrast.h"
#include "drivers/common/meta.h"
#include "util/u_memory.h"
#include "radeon_common.h"
#include "radeon_mipmap_tree.h"
#define FILE_DEBUG_FLAG RADEON_TEXTURE
#define DBG(...) do { \
if (RADEON_DEBUG & FILE_DEBUG_FLAG) \
printf(__VA_ARGS__); \
} while(0)
static void
radeon_delete_renderbuffer(struct gl_context *ctx, struct gl_renderbuffer *rb)
{
struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s(rb %p, rrb %p) \n",
__func__, rb, rrb);
assert(rrb);
if (rrb && rrb->bo) {
radeon_bo_unref(rrb->bo);
}
_mesa_delete_renderbuffer(ctx, rb);
}
#if defined(RADEON_R100)
static GLuint get_depth_z32(const struct radeon_renderbuffer * rrb,
GLint x, GLint y)
{
GLuint ba, address = 0;
ba = (y >> 4) * (rrb->pitch >> 6) + (x >> 4);
address |= (x & 0x7) << 2;
address |= (y & 0x3) << 5;
address |= (((x & 0x10) >> 2) ^ (y & 0x4)) << 5;
address |= (ba & 3) << 8;
address |= (y & 0x8) << 7;
address |= (((x & 0x8) << 1) ^ (y & 0x10)) << 7;
address |= (ba & ~0x3) << 10;
return address;
}
static GLuint get_depth_z16(const struct radeon_renderbuffer * rrb,
GLint x, GLint y)
{
GLuint ba, address = 0; /* a[0] = 0 */
ba = (y / 16) * (rrb->pitch >> 6) + (x / 32);
address |= (x & 0x7) << 1; /* a[1..3] = x[0..2] */
address |= (y & 0x7) << 4; /* a[4..6] = y[0..2] */
address |= (x & 0x8) << 4; /* a[7] = x[3] */
address |= (ba & 0x3) << 8; /* a[8..9] = ba[0..1] */
address |= (y & 0x8) << 7; /* a[10] = y[3] */
address |= ((x & 0x10) ^ (y & 0x10)) << 7;/* a[11] = x[4] ^ y[4] */
address |= (ba & ~0x3) << 10; /* a[12..] = ba[2..] */
return address;
}
#endif
#if defined(RADEON_R200)
static GLuint get_depth_z32(const struct radeon_renderbuffer * rrb,
GLint x, GLint y)
{
GLuint offset;
GLuint b;
offset = 0;
b = (((y & 0x7ff) >> 4) * (rrb->pitch >> 7) + (x >> 5));
offset += (b >> 1) << 12;
offset += (((rrb->pitch >> 7) & 0x1) ? (b & 0x1) : ((b & 0x1) ^ ((y >> 4) & 0x1))) << 11;
offset += ((y >> 2) & 0x3) << 9;
offset += ((x >> 2) & 0x1) << 8;
offset += ((x >> 3) & 0x3) << 6;
offset += ((y >> 1) & 0x1) << 5;
offset += ((x >> 1) & 0x1) << 4;
offset += (y & 0x1) << 3;
offset += (x & 0x1) << 2;
return offset;
}
static GLuint get_depth_z16(const struct radeon_renderbuffer *rrb,
GLint x, GLint y)
{
GLuint offset;
GLuint b;
offset = 0;
b = (((y >> 4) * (rrb->pitch >> 7) + (x >> 6)));
offset += (b >> 1) << 12;
offset += (((rrb->pitch >> 7) & 0x1) ? (b & 0x1) : ((b & 0x1) ^ ((y >> 4) & 0x1))) << 11;
offset += ((y >> 2) & 0x3) << 9;
offset += ((x >> 3) & 0x1) << 8;
offset += ((x >> 4) & 0x3) << 6;
offset += ((x >> 2) & 0x1) << 5;
offset += ((y >> 1) & 0x1) << 4;
offset += ((x >> 1) & 0x1) << 3;
offset += (y & 0x1) << 2;
offset += (x & 0x1) << 1;
return offset;
}
#endif
static void
radeon_map_renderbuffer_s8z24(struct gl_renderbuffer *rb,
GLuint x, GLuint y, GLuint w, GLuint h,
GLbitfield mode,
GLubyte **out_map,
GLint *out_stride)
{
struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
uint32_t *untiled_s8z24_map, *tiled_s8z24_map;
int ret;
int y_flip = (rb->Name == 0) ? -1 : 1;
int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0;
uint32_t pitch = w * rrb->cpp;
rrb->map_pitch = pitch;
rrb->map_buffer = malloc(w * h * 4);
ret = radeon_bo_map(rrb->bo, !!(mode & GL_MAP_WRITE_BIT));
assert(!ret);
(void) ret;
untiled_s8z24_map = rrb->map_buffer;
tiled_s8z24_map = rrb->bo->ptr;
for (uint32_t pix_y = 0; pix_y < h; ++ pix_y) {
for (uint32_t pix_x = 0; pix_x < w; ++pix_x) {
uint32_t flipped_y = y_flip * (int32_t)(y + pix_y) + y_bias;
uint32_t src_offset = get_depth_z32(rrb, x + pix_x, flipped_y);
uint32_t dst_offset = pix_y * rrb->map_pitch + pix_x * rrb->cpp;
untiled_s8z24_map[dst_offset/4] = tiled_s8z24_map[src_offset/4];
}
}
radeon_bo_unmap(rrb->bo);
*out_map = rrb->map_buffer;
*out_stride = rrb->map_pitch;
}
static void
radeon_map_renderbuffer_z16(struct gl_renderbuffer *rb,
GLuint x, GLuint y, GLuint w, GLuint h,
GLbitfield mode,
GLubyte **out_map,
GLint *out_stride)
{
struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
uint16_t *untiled_z16_map, *tiled_z16_map;
int ret;
int y_flip = (rb->Name == 0) ? -1 : 1;
int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0;
uint32_t pitch = w * rrb->cpp;
rrb->map_pitch = pitch;
rrb->map_buffer = malloc(w * h * 2);
ret = radeon_bo_map(rrb->bo, !!(mode & GL_MAP_WRITE_BIT));
assert(!ret);
(void) ret;
untiled_z16_map = rrb->map_buffer;
tiled_z16_map = rrb->bo->ptr;
for (uint32_t pix_y = 0; pix_y < h; ++ pix_y) {
for (uint32_t pix_x = 0; pix_x < w; ++pix_x) {
uint32_t flipped_y = y_flip * (int32_t)(y + pix_y) + y_bias;
uint32_t src_offset = get_depth_z16(rrb, x + pix_x, flipped_y);
uint32_t dst_offset = pix_y * rrb->map_pitch + pix_x * rrb->cpp;
untiled_z16_map[dst_offset/2] = tiled_z16_map[src_offset/2];
}
}
radeon_bo_unmap(rrb->bo);
*out_map = rrb->map_buffer;
*out_stride = rrb->map_pitch;
}
static void
radeon_map_renderbuffer(struct gl_context *ctx,
struct gl_renderbuffer *rb,
GLuint x, GLuint y, GLuint w, GLuint h,
GLbitfield mode,
GLubyte **out_map,
GLint *out_stride,
bool flip_y)
{
struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
GLubyte *map;
GLboolean ok;
int stride, flip_stride;
int ret;
int src_x, src_y;
/* driver does not support GL_FRAMEBUFFER_FLIP_Y_MESA */
assert((rb->Name == 0) == flip_y);
if (!rrb || !rrb->bo) {
*out_map = NULL;
*out_stride = 0;
return;
}
rrb->map_mode = mode;
rrb->map_x = x;
rrb->map_y = y;
rrb->map_w = w;
rrb->map_h = h;
rrb->map_pitch = rrb->pitch;
ok = rmesa->vtbl.check_blit(rb->Format, rrb->pitch / rrb->cpp);
if (ok) {
if (rb->Name) {
src_x = x;
src_y = y;
} else {
src_x = x;
src_y = rrb->base.Base.Height - y - h;
}
/* Make a temporary buffer and blit the current contents of the renderbuffer
* out to it. This gives us linear access to the buffer, instead of having
* to do detiling in software.
*/
rrb->map_pitch = rrb->pitch;
assert(!rrb->map_bo);
rrb->map_bo = radeon_bo_open(rmesa->radeonScreen->bom, 0,
rrb->map_pitch * h, 4,
RADEON_GEM_DOMAIN_GTT, 0);
ok = rmesa->vtbl.blit(ctx, rrb->bo, rrb->draw_offset,
rb->Format, rrb->pitch / rrb->cpp,
rb->Width, rb->Height,
src_x, src_y,
rrb->map_bo, 0,
rb->Format, rrb->map_pitch / rrb->cpp,
w, h,
0, 0,
w, h,
GL_FALSE);
assert(ok);
ret = radeon_bo_map(rrb->map_bo, !!(mode & GL_MAP_WRITE_BIT));
assert(!ret);
map = rrb->map_bo->ptr;
if (rb->Name) {
*out_map = map;
*out_stride = rrb->map_pitch;
} else {
*out_map = map + (h - 1) * rrb->map_pitch;
*out_stride = -rrb->map_pitch;
}
return;
}
/* sw fallback flush stuff */
if (radeon_bo_is_referenced_by_cs(rrb->bo, rmesa->cmdbuf.cs)) {
radeon_firevertices(rmesa);
}
if ((rmesa->radeonScreen->chip_flags & RADEON_CHIPSET_DEPTH_ALWAYS_TILED) && !rrb->has_surface) {
if (rb->Format == MESA_FORMAT_Z24_UNORM_S8_UINT || rb->Format == MESA_FORMAT_Z24_UNORM_X8_UINT) {
radeon_map_renderbuffer_s8z24(rb, x, y, w, h,
mode, out_map, out_stride);
return;
}
if (rb->Format == MESA_FORMAT_Z_UNORM16) {
radeon_map_renderbuffer_z16(rb, x, y, w, h,
mode, out_map, out_stride);
return;
}
}
ret = radeon_bo_map(rrb->bo, !!(mode & GL_MAP_WRITE_BIT));
assert(!ret);
(void) ret;
map = rrb->bo->ptr;
stride = rrb->map_pitch;
if (rb->Name == 0) {
y = rb->Height - 1 - y;
flip_stride = -stride;
} else {
flip_stride = stride;
map += rrb->draw_offset;
}
map += x * rrb->cpp;
map += (int)y * stride;
*out_map = map;
*out_stride = flip_stride;
}
static void
radeon_unmap_renderbuffer_s8z24(struct gl_context *ctx,
struct gl_renderbuffer *rb)
{
struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
if (!rrb->map_buffer)
return;
if (rrb->map_mode & GL_MAP_WRITE_BIT) {
uint32_t *untiled_s8z24_map = rrb->map_buffer;
uint32_t *tiled_s8z24_map;
int y_flip = (rb->Name == 0) ? -1 : 1;
int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0;
radeon_bo_map(rrb->bo, 1);
tiled_s8z24_map = rrb->bo->ptr;
for (uint32_t pix_y = 0; pix_y < rrb->map_h; pix_y++) {
for (uint32_t pix_x = 0; pix_x < rrb->map_w; pix_x++) {
uint32_t flipped_y = y_flip * (int32_t)(pix_y + rrb->map_y) + y_bias;
uint32_t dst_offset = get_depth_z32(rrb, rrb->map_x + pix_x, flipped_y);
uint32_t src_offset = pix_y * rrb->map_pitch + pix_x * rrb->cpp;
tiled_s8z24_map[dst_offset/4] = untiled_s8z24_map[src_offset/4];
}
}
radeon_bo_unmap(rrb->bo);
}
free(rrb->map_buffer);
rrb->map_buffer = NULL;
}
static void
radeon_unmap_renderbuffer_z16(struct gl_context *ctx,
struct gl_renderbuffer *rb)
{
struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
if (!rrb->map_buffer)
return;
if (rrb->map_mode & GL_MAP_WRITE_BIT) {
uint16_t *untiled_z16_map = rrb->map_buffer;
uint16_t *tiled_z16_map;
int y_flip = (rb->Name == 0) ? -1 : 1;
int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0;
radeon_bo_map(rrb->bo, 1);
tiled_z16_map = rrb->bo->ptr;
for (uint32_t pix_y = 0; pix_y < rrb->map_h; pix_y++) {
for (uint32_t pix_x = 0; pix_x < rrb->map_w; pix_x++) {
uint32_t flipped_y = y_flip * (int32_t)(pix_y + rrb->map_y) + y_bias;
uint32_t dst_offset = get_depth_z16(rrb, rrb->map_x + pix_x, flipped_y);
uint32_t src_offset = pix_y * rrb->map_pitch + pix_x * rrb->cpp;
tiled_z16_map[dst_offset/2] = untiled_z16_map[src_offset/2];
}
}
radeon_bo_unmap(rrb->bo);
}
free(rrb->map_buffer);
rrb->map_buffer = NULL;
}
static void
radeon_unmap_renderbuffer(struct gl_context *ctx,
struct gl_renderbuffer *rb)
{
struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
if ((rmesa->radeonScreen->chip_flags & RADEON_CHIPSET_DEPTH_ALWAYS_TILED) && !rrb->has_surface) {
if (rb->Format == MESA_FORMAT_Z24_UNORM_S8_UINT || rb->Format == MESA_FORMAT_Z24_UNORM_X8_UINT) {
radeon_unmap_renderbuffer_s8z24(ctx, rb);
return;
}
if (rb->Format == MESA_FORMAT_Z_UNORM16) {
radeon_unmap_renderbuffer_z16(ctx, rb);
return;
}
}
if (!rrb->map_bo) {
if (rrb->bo)
radeon_bo_unmap(rrb->bo);
return;
}
radeon_bo_unmap(rrb->map_bo);
if (rrb->map_mode & GL_MAP_WRITE_BIT) {
GLboolean ok;
ok = rmesa->vtbl.blit(ctx, rrb->map_bo, 0,
rb->Format, rrb->map_pitch / rrb->cpp,
rrb->map_w, rrb->map_h,
0, 0,
rrb->bo, rrb->draw_offset,
rb->Format, rrb->pitch / rrb->cpp,
rb->Width, rb->Height,
rrb->map_x, rrb->map_y,
rrb->map_w, rrb->map_h,
GL_FALSE);
assert(ok);
(void) ok;
}
radeon_bo_unref(rrb->map_bo);
rrb->map_bo = NULL;
}
/**
* Called via glRenderbufferStorageEXT() to set the format and allocate
* storage for a user-created renderbuffer.
*/
static GLboolean
radeon_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
GLenum internalFormat,
GLuint width, GLuint height)
{
struct radeon_context *radeon = RADEON_CONTEXT(ctx);
struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
uint32_t size, pitch;
int cpp;
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s(%p, rb %p) \n",
__func__, ctx, rb);
assert(rb->Name != 0);
switch (internalFormat) {
case GL_R3_G3_B2:
case GL_RGB4:
case GL_RGB5:
rb->Format = _radeon_texformat_rgb565;
cpp = 2;
break;
case GL_RGB:
case GL_RGB8:
case GL_RGB10:
case GL_RGB12:
case GL_RGB16:
rb->Format = _radeon_texformat_argb8888;
cpp = 4;
break;
case GL_RGBA:
case GL_RGBA2:
case GL_RGBA4:
case GL_RGB5_A1:
case GL_RGBA8:
case GL_RGB10_A2:
case GL_RGBA12:
case GL_RGBA16:
rb->Format = _radeon_texformat_argb8888;
cpp = 4;
break;
case GL_STENCIL_INDEX:
case GL_STENCIL_INDEX1_EXT:
case GL_STENCIL_INDEX4_EXT:
case GL_STENCIL_INDEX8_EXT:
case GL_STENCIL_INDEX16_EXT:
/* alloc a depth+stencil buffer */
rb->Format = MESA_FORMAT_Z24_UNORM_S8_UINT;
cpp = 4;
break;
case GL_DEPTH_COMPONENT16:
rb->Format = MESA_FORMAT_Z_UNORM16;
cpp = 2;
break;
case GL_DEPTH_COMPONENT:
case GL_DEPTH_COMPONENT24:
case GL_DEPTH_COMPONENT32:
rb->Format = MESA_FORMAT_Z24_UNORM_X8_UINT;
cpp = 4;
break;
case GL_DEPTH_STENCIL_EXT:
case GL_DEPTH24_STENCIL8_EXT:
rb->Format = MESA_FORMAT_Z24_UNORM_S8_UINT;
cpp = 4;
break;
default:
_mesa_problem(ctx,
"Unexpected format in radeon_alloc_renderbuffer_storage");
return GL_FALSE;
}
rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
if (ctx->Driver.Flush)
ctx->Driver.Flush(ctx, 0); /* +r6/r7 */
if (rrb->bo)
radeon_bo_unref(rrb->bo);
pitch = ((cpp * width + 63) & ~63) / cpp;
if (RADEON_DEBUG & RADEON_MEMORY)
fprintf(stderr,"Allocating %d x %d radeon RBO (pitch %d)\n", width,
height, pitch);
size = pitch * height * cpp;
rrb->pitch = pitch * cpp;
rrb->cpp = cpp;
rrb->bo = radeon_bo_open(radeon->radeonScreen->bom,
0,
size,
0,
RADEON_GEM_DOMAIN_VRAM,
0);
rb->Width = width;
rb->Height = height;
return GL_TRUE;
}
static void
radeon_image_target_renderbuffer_storage(struct gl_context *ctx,
struct gl_renderbuffer *rb,
void *image_handle)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
struct radeon_renderbuffer *rrb;
__DRIscreen *screen;
__DRIimage *image;
screen = radeon->radeonScreen->driScreen;
image = screen->dri2.image->lookupEGLImage(screen, image_handle,
screen->loaderPrivate);
if (image == NULL)
return;
rrb = radeon_renderbuffer(rb);
if (ctx->Driver.Flush)
ctx->Driver.Flush(ctx, 0); /* +r6/r7 */
if (rrb->bo)
radeon_bo_unref(rrb->bo);
rrb->bo = image->bo;
radeon_bo_ref(rrb->bo);
fprintf(stderr, "image->bo: %p, name: %d, rbs: w %d -> p %d\n", image->bo, image->bo->handle,
image->width, image->pitch);
rrb->cpp = image->cpp;
rrb->pitch = image->pitch * image->cpp;
rb->Format = image->format;
rb->InternalFormat = image->internal_format;
rb->Width = image->width;
rb->Height = image->height;
rb->Format = image->format;
rb->_BaseFormat = _mesa_base_fbo_format(&radeon->glCtx,
image->internal_format);
rb->NeedsFinishRenderTexture = GL_TRUE;
}
/**
* Called for each hardware renderbuffer when a _window_ is resized.
* Just update fields.
* Not used for user-created renderbuffers!
*/
static GLboolean
radeon_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
GLenum internalFormat, GLuint width, GLuint height)
{
assert(rb->Name == 0);
rb->Width = width;
rb->Height = height;
rb->InternalFormat = internalFormat;
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s(%p, rb %p) \n",
__func__, ctx, rb);
return GL_TRUE;
}
/** Dummy function for gl_renderbuffer::AllocStorage() */
static GLboolean
radeon_nop_alloc_storage(struct gl_context * ctx,
UNUSED struct gl_renderbuffer *rb,
UNUSED GLenum internalFormat,
UNUSED GLuint width,
UNUSED GLuint height)
{
_mesa_problem(ctx, "radeon_op_alloc_storage should never be called.");
return GL_FALSE;
}
/**
* Create a renderbuffer for a window's color, depth and/or stencil buffer.
* Not used for user-created renderbuffers.
*/
struct radeon_renderbuffer *
radeon_create_renderbuffer(mesa_format format, __DRIdrawable *driDrawPriv)
{
struct radeon_renderbuffer *rrb;
struct gl_renderbuffer *rb;
rrb = CALLOC_STRUCT(radeon_renderbuffer);
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s( rrb %p ) \n",
__func__, rrb);
if (!rrb)
return NULL;
rb = &rrb->base.Base;
_mesa_init_renderbuffer(rb, 0);
rb->ClassID = RADEON_RB_CLASS;
rb->Format = format;
rb->_BaseFormat = _mesa_get_format_base_format(format);
rb->InternalFormat = _mesa_get_format_base_format(format);
rrb->dPriv = driDrawPriv;
rb->Delete = radeon_delete_renderbuffer;
rb->AllocStorage = radeon_alloc_window_storage;
rrb->bo = NULL;
return rrb;
}
static struct gl_renderbuffer *
radeon_new_renderbuffer(struct gl_context * ctx, GLuint name)
{
struct radeon_renderbuffer *rrb;
struct gl_renderbuffer *rb;
rrb = CALLOC_STRUCT(radeon_renderbuffer);
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s(%p, rrb %p) \n",
__func__, ctx, rrb);
if (!rrb)
return NULL;
rb = &rrb->base.Base;
_mesa_init_renderbuffer(rb, name);
rb->ClassID = RADEON_RB_CLASS;
rb->Delete = radeon_delete_renderbuffer;
rb->AllocStorage = radeon_alloc_renderbuffer_storage;
return rb;
}
static void
radeon_bind_framebuffer(struct gl_context * ctx, GLenum target,
struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
{
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s(%p, fb %p, target %s) \n",
__func__, ctx, fb,
_mesa_enum_to_string(target));
if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
radeon_draw_buffer(ctx, fb);
}
else {
/* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
}
}
static void
radeon_framebuffer_renderbuffer(struct gl_context * ctx,
struct gl_framebuffer *fb,
GLenum attachment, struct gl_renderbuffer *rb)
{
if (ctx->Driver.Flush)
ctx->Driver.Flush(ctx, 0); /* +r6/r7 */
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s(%p, fb %p, rb %p) \n",
__func__, ctx, fb, rb);
_mesa_FramebufferRenderbuffer_sw(ctx, fb, attachment, rb);
radeon_draw_buffer(ctx, fb);
}
static GLboolean
radeon_update_wrapper(struct gl_context *ctx, struct radeon_renderbuffer *rrb,
struct gl_texture_image *texImage)
{
struct gl_renderbuffer *rb = &rrb->base.Base;
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s(%p, rrb %p, texImage %p, texFormat %s) \n",
__func__, ctx, rrb, texImage, _mesa_get_format_name(texImage->TexFormat));
rrb->cpp = _mesa_get_format_bytes(texImage->TexFormat);
rrb->pitch = texImage->Width * rrb->cpp;
rb->Format = texImage->TexFormat;
rb->InternalFormat = texImage->InternalFormat;
rb->_BaseFormat = _mesa_get_format_base_format(rb->Format);
rb->Width = texImage->Width;
rb->Height = texImage->Height;
rb->Delete = radeon_delete_renderbuffer;
rb->AllocStorage = radeon_nop_alloc_storage;
return GL_TRUE;
}
static void
radeon_render_texture(struct gl_context * ctx,
struct gl_framebuffer *fb,
struct gl_renderbuffer_attachment *att)
{
struct gl_renderbuffer *rb = att->Renderbuffer;
struct gl_texture_image *newImage = rb->TexImage;
struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
radeon_texture_image *radeon_image;
GLuint imageOffset;
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s(%p, fb %p, rrb %p, att %p)\n",
__func__, ctx, fb, rrb, att);
(void) fb;
assert(newImage);
radeon_image = (radeon_texture_image *)newImage;
if (!radeon_image->mt) {
/* Fallback on drawing to a texture without a miptree.
*/
_swrast_render_texture(ctx, fb, att);
return;
}
if (!radeon_update_wrapper(ctx, rrb, newImage)) {
_swrast_render_texture(ctx, fb, att);
return;
}
DBG("Begin render texture tex=%u w=%d h=%d refcount=%d\n",
att->Texture->Name, newImage->Width, newImage->Height,
rb->RefCount);
/* point the renderbufer's region to the texture image region */
if (rrb->bo != radeon_image->mt->bo) {
if (rrb->bo)
radeon_bo_unref(rrb->bo);
rrb->bo = radeon_image->mt->bo;
radeon_bo_ref(rrb->bo);
}
/* compute offset of the particular 2D image within the texture region */
imageOffset = radeon_miptree_image_offset(radeon_image->mt,
att->CubeMapFace,
att->TextureLevel);
if (att->Texture->Target == GL_TEXTURE_3D) {
imageOffset += radeon_image->mt->levels[att->TextureLevel].rowstride *
radeon_image->mt->levels[att->TextureLevel].height *
att->Zoffset;
}
/* store that offset in the region, along with the correct pitch for
* the image we are rendering to */
rrb->draw_offset = imageOffset;
rrb->pitch = radeon_image->mt->levels[att->TextureLevel].rowstride;
radeon_image->used_as_render_target = GL_TRUE;
/* update drawing region, etc */
radeon_draw_buffer(ctx, fb);
}
static void
radeon_finish_render_texture(struct gl_context *ctx, struct gl_renderbuffer *rb)
{
struct gl_texture_image *image = rb->TexImage;
radeon_texture_image *radeon_image = (radeon_texture_image *)image;
if (radeon_image)
radeon_image->used_as_render_target = GL_FALSE;
if (ctx->Driver.Flush)
ctx->Driver.Flush(ctx, 0); /* +r6/r7 */
}
static void
radeon_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
mesa_format mesa_format;
int i;
for (i = -2; i < (GLint) ctx->Const.MaxColorAttachments; i++) {
struct gl_renderbuffer_attachment *att;
if (i == -2) {
att = &fb->Attachment[BUFFER_DEPTH];
} else if (i == -1) {
att = &fb->Attachment[BUFFER_STENCIL];
} else {
att = &fb->Attachment[BUFFER_COLOR0 + i];
}
if (att->Type == GL_TEXTURE) {
mesa_format = att->Renderbuffer->TexImage->TexFormat;
} else {
/* All renderbuffer formats are renderable, but not sampable */
continue;
}
if (!radeon->vtbl.is_format_renderable(mesa_format)){
fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED;
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s: HW doesn't support format %s as output format of attachment %d\n",
__func__, _mesa_get_format_name(mesa_format), i);
return;
}
}
}
void radeon_fbo_init(struct radeon_context *radeon)
{
radeon->glCtx.Driver.NewRenderbuffer = radeon_new_renderbuffer;
radeon->glCtx.Driver.MapRenderbuffer = radeon_map_renderbuffer;
radeon->glCtx.Driver.UnmapRenderbuffer = radeon_unmap_renderbuffer;
radeon->glCtx.Driver.BindFramebuffer = radeon_bind_framebuffer;
radeon->glCtx.Driver.FramebufferRenderbuffer = radeon_framebuffer_renderbuffer;
radeon->glCtx.Driver.RenderTexture = radeon_render_texture;
radeon->glCtx.Driver.FinishRenderTexture = radeon_finish_render_texture;
radeon->glCtx.Driver.ValidateFramebuffer = radeon_validate_framebuffer;
radeon->glCtx.Driver.BlitFramebuffer = _mesa_meta_and_swrast_BlitFramebuffer;
radeon->glCtx.Driver.EGLImageTargetRenderbufferStorage =
radeon_image_target_renderbuffer_storage;
}
void radeon_renderbuffer_set_bo(struct radeon_renderbuffer *rb,
struct radeon_bo *bo)
{
struct radeon_bo *old;
old = rb->bo;
rb->bo = bo;
radeon_bo_ref(bo);
if (old)
radeon_bo_unref(old);
}

View File

@ -1,126 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VMware, Inc.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Keith Whitwell <keithw@vmware.com>
*/
#include "c99_math.h"
#include "main/glheader.h"
#include "main/context.h"
#include "main/mtypes.h"
#include "main/enums.h"
#include "main/macros.h"
#include "radeon_screen.h"
#include "radeon_fog.h"
/**********************************************************************/
/* Fog blend factor computation for hw tcl */
/* same calculation used as in t_vb_fog.c */
/**********************************************************************/
#define FOG_EXP_TABLE_SIZE 256
#define FOG_MAX (10.0)
#define EXP_FOG_MAX .0006595
#define FOG_INCR (FOG_MAX/FOG_EXP_TABLE_SIZE)
static GLfloat exp_table[FOG_EXP_TABLE_SIZE];
#if 1
#define NEG_EXP( result, narg ) \
do { \
GLfloat f = (GLfloat) (narg * (1.0/FOG_INCR)); \
GLint k = (GLint) f; \
if (k > FOG_EXP_TABLE_SIZE-2) \
result = (GLfloat) EXP_FOG_MAX; \
else \
result = exp_table[k] + (f-k)*(exp_table[k+1]-exp_table[k]); \
} while (0)
#else
#define NEG_EXP( result, narg ) \
do { \
result = exp(-narg); \
} while (0)
#endif
/**
* Initialize the exp_table[] lookup table for approximating exp().
*/
void
radeonInitStaticFogData( void )
{
GLfloat f = 0.0F;
GLint i = 0;
for ( ; i < FOG_EXP_TABLE_SIZE ; i++, f += FOG_INCR) {
exp_table[i] = (GLfloat) exp(-f);
}
}
/**
* Compute per-vertex fog blend factors from fog coordinates by
* evaluating the GL_LINEAR, GL_EXP or GL_EXP2 fog function.
* Fog coordinates are distances from the eye (typically between the
* near and far clip plane distances).
* Note the fog (eye Z) coords may be negative so we use ABS(z) below.
* Fog blend factors are in the range [0,1].
*/
float
radeonComputeFogBlendFactor( struct gl_context *ctx, GLfloat fogcoord )
{
GLfloat end = ctx->Fog.End;
GLfloat d, temp;
const GLfloat z = fabsf(fogcoord);
switch (ctx->Fog.Mode) {
case GL_LINEAR:
if (ctx->Fog.Start == ctx->Fog.End)
d = 1.0F;
else
d = 1.0F / (ctx->Fog.End - ctx->Fog.Start);
temp = (end - z) * d;
return CLAMP(temp, 0.0F, 1.0F);
break;
case GL_EXP:
d = ctx->Fog.Density;
NEG_EXP( temp, d * z );
return temp;
break;
case GL_EXP2:
d = ctx->Fog.Density*ctx->Fog.Density;
NEG_EXP( temp, d * z * z );
return temp;
break;
default:
_mesa_problem(ctx, "Bad fog mode in make_fog_coord");
return 0;
}
}

View File

@ -1,44 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VMware, Inc.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Keith Whitwell <keithw@vmware.com>
*/
#ifndef RADEON_FOG_H
#define RADEON_FOG_H
void
radeonInitStaticFogData( void );
float
radeonComputeFogBlendFactor( struct gl_context *ctx, GLfloat fogcoord );
#endif

View File

@ -1,417 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VA Linux Systems Inc., Fremont, California.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
* Keith Whitwell <keithw@vmware.com>
*/
#include <sched.h>
#include <errno.h>
#include "main/attrib.h"
#include "main/bufferobj.h"
#include "swrast/swrast.h"
#include "main/glheader.h"
#include "util/simple_list.h"
#include "radeon_context.h"
#include "radeon_common.h"
#include "radeon_ioctl.h"
#define RADEON_TIMEOUT 512
#define RADEON_IDLE_RETRY 16
/* =============================================================
* Kernel command buffer handling
*/
/* The state atoms will be emitted in the order they appear in the atom list,
* so this step is important.
*/
void radeonSetUpAtomList( r100ContextPtr rmesa )
{
int i, mtu = rmesa->radeon.glCtx.Const.MaxTextureUnits;
make_empty_list(&rmesa->radeon.hw.atomlist);
rmesa->radeon.hw.atomlist.name = "atom-list";
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ctx);
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.set);
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lin);
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msk);
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.vpt);
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tcl);
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msc);
for (i = 0; i < mtu; ++i) {
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tex[i]);
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.txr[i]);
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.cube[i]);
}
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.zbs);
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mtl);
for (i = 0; i < 3 + mtu; ++i)
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mat[i]);
for (i = 0; i < 8; ++i)
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lit[i]);
for (i = 0; i < 6; ++i)
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ucp[i]);
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.stp);
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.eye);
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.grd);
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.fog);
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.glt);
}
static void radeonEmitScissor(r100ContextPtr rmesa)
{
BATCH_LOCALS(&rmesa->radeon);
if (rmesa->radeon.state.scissor.enabled) {
BEGIN_BATCH(6);
OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] | RADEON_SCISSOR_ENABLE);
OUT_BATCH(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
OUT_BATCH((rmesa->radeon.state.scissor.rect.y1 << 16) |
rmesa->radeon.state.scissor.rect.x1);
OUT_BATCH(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
OUT_BATCH(((rmesa->radeon.state.scissor.rect.y2) << 16) |
(rmesa->radeon.state.scissor.rect.x2));
END_BATCH();
} else {
BEGIN_BATCH(2);
OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0));
OUT_BATCH(rmesa->hw.ctx.cmd[CTX_PP_CNTL] & ~RADEON_SCISSOR_ENABLE);
END_BATCH();
}
}
/* Fire a section of the retained (indexed_verts) buffer as a regular
* primtive.
*/
extern void radeonEmitVbufPrim( r100ContextPtr rmesa,
GLuint vertex_format,
GLuint primitive,
GLuint vertex_nr )
{
BATCH_LOCALS(&rmesa->radeon);
assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
radeonEmitState(&rmesa->radeon);
radeonEmitScissor(rmesa);
#if RADEON_OLD_PACKETS
BEGIN_BATCH(8);
OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 3);
OUT_BATCH(rmesa->ioctl.vertex_offset);
OUT_BATCH(vertex_nr);
OUT_BATCH(vertex_format);
OUT_BATCH(primitive | RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
(vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
rmesa->ioctl.bo,
RADEON_GEM_DOMAIN_GTT,
0, 0);
END_BATCH();
#else
BEGIN_BATCH(4);
OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_DRAW_VBUF, 1);
OUT_BATCH(vertex_format);
OUT_BATCH(primitive |
RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
RADEON_CP_VC_CNTL_MAOS_ENABLE |
RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
(vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
END_BATCH();
#endif
}
void radeonFlushElts( struct gl_context *ctx )
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
BATCH_LOCALS(&rmesa->radeon);
int nr;
uint32_t *cmd = (uint32_t *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_start);
int dwords = (rmesa->radeon.cmdbuf.cs->section_ndw - rmesa->radeon.cmdbuf.cs->section_cdw);
if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s\n", __func__);
assert( rmesa->radeon.dma.flush == radeonFlushElts );
rmesa->radeon.dma.flush = NULL;
nr = rmesa->tcl.elt_used;
#if RADEON_OLD_PACKETS
dwords -= 2;
#endif
#if RADEON_OLD_PACKETS
cmd[1] |= (dwords + 3) << 16;
cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
#else
cmd[1] |= (dwords + 2) << 16;
cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
#endif
rmesa->radeon.cmdbuf.cs->cdw += dwords;
rmesa->radeon.cmdbuf.cs->section_cdw += dwords;
#if RADEON_OLD_PACKETS
radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
rmesa->ioctl.bo,
RADEON_GEM_DOMAIN_GTT,
0, 0);
#endif
END_BATCH();
if (RADEON_DEBUG & RADEON_SYNC) {
fprintf(stderr, "%s: Syncing\n", __func__);
radeonFinish( &rmesa->radeon.glCtx );
}
}
GLushort *radeonAllocEltsOpenEnded( r100ContextPtr rmesa,
GLuint vertex_format,
GLuint primitive,
GLuint min_nr )
{
GLushort *retval;
int align_min_nr;
BATCH_LOCALS(&rmesa->radeon);
if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s %d prim %x\n", __func__, min_nr, primitive);
assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
radeonEmitState(&rmesa->radeon);
radeonEmitScissor(rmesa);
rmesa->tcl.elt_cmd_start = rmesa->radeon.cmdbuf.cs->cdw;
/* round up min_nr to align the state */
align_min_nr = (min_nr + 1) & ~1;
#if RADEON_OLD_PACKETS
BEGIN_BATCH(2+ELTS_BUFSZ(align_min_nr)/4);
OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 0);
OUT_BATCH(rmesa->ioctl.vertex_offset);
OUT_BATCH(rmesa->ioctl.vertex_max);
OUT_BATCH(vertex_format);
OUT_BATCH(primitive |
RADEON_CP_VC_CNTL_PRIM_WALK_IND |
RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
#else
BEGIN_BATCH(ELTS_BUFSZ(align_min_nr)/4);
OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_DRAW_INDX, 0);
OUT_BATCH(vertex_format);
OUT_BATCH(primitive |
RADEON_CP_VC_CNTL_PRIM_WALK_IND |
RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
RADEON_CP_VC_CNTL_MAOS_ENABLE |
RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
#endif
rmesa->tcl.elt_cmd_offset = rmesa->radeon.cmdbuf.cs->cdw;
rmesa->tcl.elt_used = min_nr;
retval = (GLushort *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_offset);
if (RADEON_DEBUG & RADEON_RENDER)
fprintf(stderr, "%s: header prim %x \n",
__func__, primitive);
assert(!rmesa->radeon.dma.flush);
rmesa->radeon.glCtx.Driver.NeedFlush |= FLUSH_STORED_VERTICES;
rmesa->radeon.dma.flush = radeonFlushElts;
return retval;
}
void radeonEmitVertexAOS( r100ContextPtr rmesa,
GLuint vertex_size,
struct radeon_bo *bo,
GLuint offset )
{
#if RADEON_OLD_PACKETS
rmesa->ioctl.vertex_offset = offset;
rmesa->ioctl.bo = bo;
#else
BATCH_LOCALS(&rmesa->radeon);
if (RADEON_DEBUG & (RADEON_PRIMS|RADEON_IOCTL))
fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
__func__, vertex_size, offset);
BEGIN_BATCH(7);
OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, 2);
OUT_BATCH(1);
OUT_BATCH(vertex_size | (vertex_size << 8));
OUT_BATCH_RELOC(bo, offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
END_BATCH();
#endif
}
void radeonEmitAOS( r100ContextPtr rmesa,
GLuint nr,
GLuint offset )
{
#if RADEON_OLD_PACKETS
assert( nr == 1 );
rmesa->ioctl.bo = rmesa->radeon.tcl.aos[0].bo;
rmesa->ioctl.vertex_offset =
(rmesa->radeon.tcl.aos[0].offset + offset * rmesa->radeon.tcl.aos[0].stride * 4);
rmesa->ioctl.vertex_max = rmesa->radeon.tcl.aos[0].count;
#else
BATCH_LOCALS(&rmesa->radeon);
uint32_t voffset;
// int sz = AOS_BUFSZ(nr);
int sz = 1 + (nr >> 1) * 3 + (nr & 1) * 2;
int i;
if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s\n", __func__);
BEGIN_BATCH(sz+2+(nr * 2));
OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, sz - 1);
OUT_BATCH(nr);
{
for (i = 0; i + 1 < nr; i += 2) {
OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) |
(rmesa->radeon.tcl.aos[i].stride << 8) |
(rmesa->radeon.tcl.aos[i + 1].components << 16) |
(rmesa->radeon.tcl.aos[i + 1].stride << 24));
voffset = rmesa->radeon.tcl.aos[i + 0].offset +
offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
OUT_BATCH(voffset);
voffset = rmesa->radeon.tcl.aos[i + 1].offset +
offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
OUT_BATCH(voffset);
}
if (nr & 1) {
OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) |
(rmesa->radeon.tcl.aos[nr - 1].stride << 8));
voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
OUT_BATCH(voffset);
}
for (i = 0; i + 1 < nr; i += 2) {
voffset = rmesa->radeon.tcl.aos[i + 0].offset +
offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride;
radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
rmesa->radeon.tcl.aos[i+0].bo,
RADEON_GEM_DOMAIN_GTT,
0, 0);
voffset = rmesa->radeon.tcl.aos[i + 1].offset +
offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride;
radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
rmesa->radeon.tcl.aos[i+1].bo,
RADEON_GEM_DOMAIN_GTT,
0, 0);
}
if (nr & 1) {
voffset = rmesa->radeon.tcl.aos[nr - 1].offset +
offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride;
radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
rmesa->radeon.tcl.aos[nr-1].bo,
RADEON_GEM_DOMAIN_GTT,
0, 0);
}
}
END_BATCH();
#endif
}
/* ================================================================
* Buffer clear
*/
#define RADEON_MAX_CLEARS 256
static void radeonClear( struct gl_context *ctx, GLbitfield mask )
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
GLuint hwmask, swmask;
GLuint hwbits = BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_BACK_LEFT |
BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL |
BUFFER_BIT_COLOR0;
if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
rmesa->radeon.front_buffer_dirty = GL_TRUE;
}
if ( RADEON_DEBUG & RADEON_IOCTL ) {
fprintf( stderr, "radeonClear\n");
}
radeon_firevertices(&rmesa->radeon);
hwmask = mask & hwbits;
swmask = mask & ~hwbits;
if ( swmask ) {
if (RADEON_DEBUG & RADEON_FALLBACKS)
fprintf(stderr, "%s: swrast clear, mask: %x\n", __func__, swmask);
_swrast_Clear( ctx, swmask );
}
if ( !hwmask )
return;
radeonUserClear(ctx, hwmask);
}
void radeonInitIoctlFuncs( struct gl_context *ctx )
{
ctx->Driver.Clear = radeonClear;
ctx->Driver.Finish = radeonFinish;
ctx->Driver.Flush = radeonFlush;
}

View File

@ -1,170 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VA Linux Systems Inc., Fremont, California.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*/
#ifndef __RADEON_IOCTL_H__
#define __RADEON_IOCTL_H__
#include "radeon_bo_gem.h"
#include "radeon_cs_gem.h"
extern void radeonEmitVertexAOS( r100ContextPtr rmesa,
GLuint vertex_size,
struct radeon_bo *bo,
GLuint offset );
extern void radeonEmitVbufPrim( r100ContextPtr rmesa,
GLuint vertex_format,
GLuint primitive,
GLuint vertex_nr );
extern void radeonFlushElts( struct gl_context *ctx );
extern GLushort *radeonAllocEltsOpenEnded( r100ContextPtr rmesa,
GLuint vertex_format,
GLuint primitive,
GLuint min_nr );
extern void radeonEmitAOS( r100ContextPtr rmesa,
GLuint n,
GLuint offset );
extern void radeonEmitBlit( r100ContextPtr rmesa,
GLuint color_fmt,
GLuint src_pitch,
GLuint src_offset,
GLuint dst_pitch,
GLuint dst_offset,
GLint srcx, GLint srcy,
GLint dstx, GLint dsty,
GLuint w, GLuint h );
extern void radeonEmitWait( r100ContextPtr rmesa, GLuint flags );
extern void radeonFlushCmdBuf( r100ContextPtr rmesa, const char * );
extern void radeonFlush( struct gl_context *ctx, unsigned gallium_flush_flags );
extern void radeonFinish( struct gl_context *ctx );
extern void radeonInitIoctlFuncs( struct gl_context *ctx );
extern void radeonGetAllParams( r100ContextPtr rmesa );
extern void radeonSetUpAtomList( r100ContextPtr rmesa );
/* ================================================================
* Helper macros:
*/
/* Close off the last primitive, if it exists.
*/
#define RADEON_NEWPRIM( rmesa ) \
do { \
if ( rmesa->radeon.dma.flush ) \
rmesa->radeon.dma.flush( &rmesa->radeon.glCtx ); \
} while (0)
/* Can accommodate several state changes and primitive changes without
* actually firing the buffer.
*/
#define RADEON_STATECHANGE( rmesa, ATOM ) \
do { \
RADEON_NEWPRIM( rmesa ); \
rmesa->hw.ATOM.dirty = GL_TRUE; \
rmesa->radeon.hw.is_dirty = GL_TRUE; \
} while (0)
#define RADEON_DB_STATE( ATOM ) \
memcpy( rmesa->hw.ATOM.lastcmd, rmesa->hw.ATOM.cmd, \
rmesa->hw.ATOM.cmd_size * 4)
static inline int RADEON_DB_STATECHANGE(r100ContextPtr rmesa,
struct radeon_state_atom *atom )
{
if (memcmp(atom->cmd, atom->lastcmd, atom->cmd_size*4)) {
GLuint *tmp;
RADEON_NEWPRIM( rmesa );
atom->dirty = GL_TRUE;
rmesa->radeon.hw.is_dirty = GL_TRUE;
tmp = atom->cmd;
atom->cmd = atom->lastcmd;
atom->lastcmd = tmp;
return 1;
}
else
return 0;
}
/* Command lengths. Note that any time you ensure ELTS_BUFSZ or VBUF_BUFSZ
* are available, you will also be adding an rmesa->state.max_state_size because
* r200EmitState is called from within r200EmitVbufPrim and r200FlushElts.
*/
#if RADEON_OLD_PACKETS
#define AOS_BUFSZ(nr) ((3 + ((nr / 2) * 3) + ((nr & 1) * 2))+nr*2)
#define VERT_AOS_BUFSZ (0)
#define ELTS_BUFSZ(nr) (24 + nr * 2)
#define VBUF_BUFSZ (8)
#else
#define AOS_BUFSZ(nr) ((3 + ((nr / 2) * 3) + ((nr & 1) * 2) + nr*2))
#define VERT_AOS_BUFSZ (5)
#define ELTS_BUFSZ(nr) (16 + nr * 2)
#define VBUF_BUFSZ (4)
#endif
#define SCISSOR_BUFSZ (8)
#define INDEX_BUFSZ (7)
static inline uint32_t cmdpacket3(int cmd_type)
{
drm_radeon_cmd_header_t cmd;
cmd.i = 0;
cmd.header.cmd_type = cmd_type;
return (uint32_t)cmd.i;
}
#define OUT_BATCH_PACKET3(packet, num_extra) do { \
OUT_BATCH(CP_PACKET2); \
OUT_BATCH(CP_PACKET3((packet), (num_extra))); \
} while(0)
#define OUT_BATCH_PACKET3_CLIP(packet, num_extra) do { \
OUT_BATCH(CP_PACKET2); \
OUT_BATCH(CP_PACKET3((packet), (num_extra))); \
} while(0)
#endif /* __RADEON_IOCTL_H__ */

View File

@ -1,12 +0,0 @@
/* If using new packets, can choose either verts or arrays.
* Otherwise, must use verts.
*/
#include "radeon_context.h"
#define RADEON_MAOS_VERTS 0
#if (RADEON_MAOS_VERTS) || (RADEON_OLD_PACKETS)
#include "radeon_maos_verts.c"
#else
#include "radeon_maos_arrays.c"
#endif

View File

@ -1,42 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VMware, Inc.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Keith Whitwell <keithw@vmware.com>
*/
#ifndef __RADEON_MAOS_H__
#define __RADEON_MAOS_H__
#include "radeon_context.h"
extern void radeonEmitArrays( struct gl_context *ctx, GLuint inputs );
#endif

View File

@ -1,289 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VMware, Inc.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Keith Whitwell <keithw@vmware.com>
*/
#include "main/glheader.h"
#include "main/mtypes.h"
#include "main/macros.h"
#include "swrast_setup/swrast_setup.h"
#include "math/m_translate.h"
#include "tnl/tnl.h"
#include "radeon_context.h"
#include "radeon_ioctl.h"
#include "radeon_state.h"
#include "radeon_swtcl.h"
#include "radeon_maos.h"
#include "radeon_tcl.h"
static void emit_s0_vec(uint32_t *out, GLvoid *data, int stride, int count)
{
int i;
if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d\n",
__func__, count, stride);
for (i = 0; i < count; i++) {
out[0] = *(int *)data;
out[1] = 0;
out += 2;
data += stride;
}
}
static void emit_stq_vec(uint32_t *out, GLvoid *data, int stride, int count)
{
int i;
if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d\n",
__func__, count, stride);
for (i = 0; i < count; i++) {
out[0] = *(int *)data;
out[1] = *(int *)(data+4);
out[2] = *(int *)(data+12);
out += 3;
data += stride;
}
}
static void emit_tex_vector(struct gl_context *ctx, struct radeon_aos *aos,
GLvoid *data, int size, int stride, int count)
{
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
int emitsize;
uint32_t *out;
if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s %d/%d\n", __func__, count, size);
switch (size) {
case 4: emitsize = 3; break;
case 3: emitsize = 3; break;
default: emitsize = 2; break;
}
if (stride == 0) {
radeonAllocDmaRegion(rmesa, &aos->bo, &aos->offset, emitsize * 4, 32);
count = 1;
aos->stride = 0;
}
else {
radeonAllocDmaRegion(rmesa, &aos->bo, &aos->offset, emitsize * count * 4, 32);
aos->stride = emitsize;
}
aos->components = emitsize;
aos->count = count;
/* Emit the data
*/
radeon_bo_map(aos->bo, 1);
out = (uint32_t*)((char*)aos->bo->ptr + aos->offset);
switch (size) {
case 1:
emit_s0_vec( out, data, stride, count );
break;
case 2:
radeonEmitVec8( out, data, stride, count );
break;
case 3:
radeonEmitVec12( out, data, stride, count );
break;
case 4:
emit_stq_vec( out, data, stride, count );
break;
default:
assert(0);
exit(1);
break;
}
radeon_bo_unmap(aos->bo);
}
/* Emit any changed arrays to new GART memory, re-emit a packet to
* update the arrays.
*/
void radeonEmitArrays( struct gl_context *ctx, GLuint inputs )
{
r100ContextPtr rmesa = R100_CONTEXT( ctx );
struct vertex_buffer *VB = &TNL_CONTEXT( ctx )->vb;
GLuint nr = 0;
GLuint vfmt = 0;
GLuint count = VB->Count;
GLuint vtx, unit;
#if 0
if (RADEON_DEBUG & RADEON_VERTS)
_tnl_print_vert_flags( __func__, inputs );
#endif
if (1) {
if (!rmesa->tcl.obj.buf)
rcommon_emit_vector( ctx,
&(rmesa->tcl.aos[nr]),
(char *)VB->AttribPtr[_TNL_ATTRIB_POS]->data,
VB->AttribPtr[_TNL_ATTRIB_POS]->size,
VB->AttribPtr[_TNL_ATTRIB_POS]->stride,
count);
switch( VB->AttribPtr[_TNL_ATTRIB_POS]->size ) {
case 4: vfmt |= RADEON_CP_VC_FRMT_W0;
case 3: vfmt |= RADEON_CP_VC_FRMT_Z;
case 2: vfmt |= RADEON_CP_VC_FRMT_XY;
default:
break;
}
nr++;
}
if (inputs & VERT_BIT_NORMAL) {
if (!rmesa->tcl.norm.buf)
rcommon_emit_vector( ctx,
&(rmesa->tcl.aos[nr]),
(char *)VB->AttribPtr[_TNL_ATTRIB_NORMAL]->data,
3,
VB->AttribPtr[_TNL_ATTRIB_NORMAL]->stride,
count);
vfmt |= RADEON_CP_VC_FRMT_N0;
nr++;
}
if (inputs & VERT_BIT_COLOR0) {
int emitsize;
if (VB->AttribPtr[_TNL_ATTRIB_COLOR0]->size == 4 &&
(VB->AttribPtr[_TNL_ATTRIB_COLOR0]->stride != 0 ||
VB->AttribPtr[_TNL_ATTRIB_COLOR0]->data[0][3] != 1.0)) {
vfmt |= RADEON_CP_VC_FRMT_FPCOLOR | RADEON_CP_VC_FRMT_FPALPHA;
emitsize = 4;
}
else {
vfmt |= RADEON_CP_VC_FRMT_FPCOLOR;
emitsize = 3;
}
if (!rmesa->tcl.rgba.buf)
rcommon_emit_vector( ctx,
&(rmesa->tcl.aos[nr]),
(char *)VB->AttribPtr[_TNL_ATTRIB_COLOR0]->data,
emitsize,
VB->AttribPtr[_TNL_ATTRIB_COLOR0]->stride,
count);
nr++;
}
if (inputs & VERT_BIT_COLOR1) {
if (!rmesa->tcl.spec.buf) {
rcommon_emit_vector( ctx,
&(rmesa->tcl.aos[nr]),
(char *)VB->AttribPtr[_TNL_ATTRIB_COLOR1]->data,
3,
VB->AttribPtr[_TNL_ATTRIB_COLOR1]->stride,
count);
}
vfmt |= RADEON_CP_VC_FRMT_FPSPEC;
nr++;
}
/* FIXME: not sure if this is correct. May need to stitch this together with
secondary color. It seems odd that for primary color color and alpha values
are emitted together but for secondary color not. */
if (inputs & VERT_BIT_FOG) {
if (!rmesa->tcl.fog.buf)
rcommon_emit_vecfog( ctx,
&(rmesa->tcl.aos[nr]),
(char *)VB->AttribPtr[_TNL_ATTRIB_FOG]->data,
VB->AttribPtr[_TNL_ATTRIB_FOG]->stride,
count);
vfmt |= RADEON_CP_VC_FRMT_FPFOG;
nr++;
}
vtx = (rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] &
~(RADEON_TCL_VTX_Q0|RADEON_TCL_VTX_Q1|RADEON_TCL_VTX_Q2));
for (unit = 0; unit < ctx->Const.MaxTextureUnits; unit++) {
if (inputs & VERT_BIT_TEX(unit)) {
if (!rmesa->tcl.tex[unit].buf)
emit_tex_vector( ctx,
&(rmesa->tcl.aos[nr]),
(char *)VB->AttribPtr[_TNL_ATTRIB_TEX0 + unit]->data,
VB->AttribPtr[_TNL_ATTRIB_TEX0 + unit]->size,
VB->AttribPtr[_TNL_ATTRIB_TEX0 + unit]->stride,
count );
nr++;
vfmt |= RADEON_ST_BIT(unit);
/* assume we need the 3rd coord if texgen is active for r/q OR at least
3 coords are submitted. This may not be 100% correct */
if (VB->AttribPtr[_TNL_ATTRIB_TEX0 + unit]->size >= 3) {
vtx |= RADEON_Q_BIT(unit);
vfmt |= RADEON_Q_BIT(unit);
}
if ( (ctx->Texture.Unit[unit].TexGenEnabled & (R_BIT | Q_BIT)) )
vtx |= RADEON_Q_BIT(unit);
else if ((VB->AttribPtr[_TNL_ATTRIB_TEX0 + unit]->size >= 3) &&
(!ctx->Texture.Unit[unit]._Current ||
ctx->Texture.Unit[unit]._Current->Target != GL_TEXTURE_CUBE_MAP)) {
GLuint swaptexmatcol = (VB->AttribPtr[_TNL_ATTRIB_TEX0 + unit]->size - 3);
if (((rmesa->NeedTexMatrix >> unit) & 1) &&
(swaptexmatcol != ((rmesa->TexMatColSwap >> unit) & 1)))
radeonUploadTexMatrix( rmesa, unit, swaptexmatcol ) ;
}
}
}
if (vtx != rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT]) {
RADEON_STATECHANGE( rmesa, tcl );
rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] = vtx;
}
rmesa->tcl.nr_aos_components = nr;
rmesa->tcl.vertex_format = vfmt;
}

View File

@ -1,300 +0,0 @@
/*
* Mesa 3-D graphics library
*
* Copyright (C) 1999-2002 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Keith Whitwell <keithw@vmware.com>
*/
#ifndef LOCALVARS
#define LOCALVARS
#endif
#undef TCL_DEBUG
#ifndef TCL_DEBUG
#define TCL_DEBUG 0
#endif
static void TAG(emit)( struct gl_context *ctx,
GLuint start, GLuint end,
void *dest )
{
LOCALVARS
struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
GLuint (*tc0)[4], (*tc1)[4], (*tc2)[4];
GLfloat (*col)[4], (*spec)[4];
GLfloat (*fog)[4];
GLuint (*norm)[4];
GLuint tc0_stride, tc1_stride, col_stride, spec_stride, fog_stride;
GLuint tc2_stride, norm_stride;
GLuint fill_tex = 0;
GLuint rqcoordsnoswap = 0;
GLuint (*coord)[4];
GLuint coord_stride; /* object coordinates */
int i;
union emit_union *v = (union emit_union *)dest;
radeon_print(RADEON_SWRENDER, RADEON_VERBOSE, "%s\n", __func__);
coord = (GLuint (*)[4])VB->AttribPtr[_TNL_ATTRIB_POS]->data;
coord_stride = VB->AttribPtr[_TNL_ATTRIB_POS]->stride;
if (DO_TEX2) {
if (VB->AttribPtr[_TNL_ATTRIB_TEX2]) {
const GLuint t2 = GET_TEXSOURCE(2);
tc2 = (GLuint (*)[4])VB->AttribPtr[_TNL_ATTRIB_TEX0 + t2]->data;
tc2_stride = VB->AttribPtr[_TNL_ATTRIB_TEX0 + t2]->stride;
if (DO_PTEX && VB->AttribPtr[_TNL_ATTRIB_TEX0 + t2]->size < 3) {
fill_tex |= (1<<2);
}
else if (DO_PTEX && VB->AttribPtr[_TNL_ATTRIB_TEX0 + t2]->size < 4) {
rqcoordsnoswap |= (1<<2);
}
} else {
tc2 = (GLuint (*)[4])&ctx->Current.Attrib[VERT_ATTRIB_TEX2];
tc2_stride = 0;
}
}
if (DO_TEX1) {
if (VB->AttribPtr[_TNL_ATTRIB_TEX1]) {
const GLuint t1 = GET_TEXSOURCE(1);
tc1 = (GLuint (*)[4])VB->AttribPtr[_TNL_ATTRIB_TEX0 + t1]->data;
tc1_stride = VB->AttribPtr[_TNL_ATTRIB_TEX0 + t1]->stride;
if (DO_PTEX && VB->AttribPtr[_TNL_ATTRIB_TEX0 + t1]->size < 3) {
fill_tex |= (1<<1);
}
else if (DO_PTEX && VB->AttribPtr[_TNL_ATTRIB_TEX0 + t1]->size < 4) {
rqcoordsnoswap |= (1<<1);
}
} else {
tc1 = (GLuint (*)[4])&ctx->Current.Attrib[VERT_ATTRIB_TEX1];
tc1_stride = 0;
}
}
if (DO_TEX0) {
if (VB->AttribPtr[_TNL_ATTRIB_TEX0]) {
const GLuint t0 = GET_TEXSOURCE(0);
tc0_stride = VB->AttribPtr[_TNL_ATTRIB_TEX0 + t0]->stride;
tc0 = (GLuint (*)[4])VB->AttribPtr[_TNL_ATTRIB_TEX0 + t0]->data;
if (DO_PTEX && VB->AttribPtr[_TNL_ATTRIB_TEX0 + t0]->size < 3) {
fill_tex |= (1<<0);
}
else if (DO_PTEX && VB->AttribPtr[_TNL_ATTRIB_TEX0 + t0]->size < 4) {
rqcoordsnoswap |= (1<<0);
}
} else {
tc0 = (GLuint (*)[4])&ctx->Current.Attrib[VERT_ATTRIB_TEX0];
tc0_stride = 0;
}
}
if (DO_NORM) {
if (VB->AttribPtr[_TNL_ATTRIB_NORMAL]) {
norm_stride = VB->AttribPtr[_TNL_ATTRIB_NORMAL]->stride;
norm = (GLuint (*)[4])VB->AttribPtr[_TNL_ATTRIB_NORMAL]->data;
} else {
norm_stride = 0;
norm = (GLuint (*)[4])&ctx->Current.Attrib[VERT_ATTRIB_NORMAL];
}
}
if (DO_RGBA) {
if (VB->AttribPtr[_TNL_ATTRIB_COLOR0]) {
col = VB->AttribPtr[_TNL_ATTRIB_COLOR0]->data;
col_stride = VB->AttribPtr[_TNL_ATTRIB_COLOR0]->stride;
} else {
col = (GLfloat (*)[4])ctx->Current.Attrib[VERT_ATTRIB_COLOR0];
col_stride = 0;
}
}
if (DO_SPEC_OR_FOG) {
if (VB->AttribPtr[_TNL_ATTRIB_COLOR1]) {
spec = VB->AttribPtr[_TNL_ATTRIB_COLOR1]->data;
spec_stride = VB->AttribPtr[_TNL_ATTRIB_COLOR1]->stride;
} else {
spec = (GLfloat (*)[4])ctx->Current.Attrib[VERT_ATTRIB_COLOR1];
spec_stride = 0;
}
}
if (DO_SPEC_OR_FOG) {
if (VB->AttribPtr[_TNL_ATTRIB_FOG]) {
fog = VB->AttribPtr[_TNL_ATTRIB_FOG]->data;
fog_stride = VB->AttribPtr[_TNL_ATTRIB_FOG]->stride;
} else {
fog = (GLfloat (*)[4])ctx->Current.Attrib[VERT_ATTRIB_FOG];
fog_stride = 0;
}
}
if (start) {
coord = (GLuint (*)[4])((GLubyte *)coord + start * coord_stride);
if (DO_TEX0)
tc0 = (GLuint (*)[4])((GLubyte *)tc0 + start * tc0_stride);
if (DO_TEX1)
tc1 = (GLuint (*)[4])((GLubyte *)tc1 + start * tc1_stride);
if (DO_TEX2)
tc2 = (GLuint (*)[4])((GLubyte *)tc2 + start * tc2_stride);
if (DO_NORM)
norm = (GLuint (*)[4])((GLubyte *)norm + start * norm_stride);
if (DO_RGBA)
STRIDE_4F(col, start * col_stride);
if (DO_SPEC)
STRIDE_4F(spec, start * spec_stride);
if (DO_FOG)
STRIDE_4F(fog, start * fog_stride);
}
{
for (i=start; i < end; i++) {
v[0].ui = coord[0][0];
v[1].ui = coord[0][1];
v[2].ui = coord[0][2];
if (DO_W) {
v[3].ui = coord[0][3];
v += 4;
}
else
v += 3;
coord = (GLuint (*)[4])((GLubyte *)coord + coord_stride);
if (DO_NORM) {
v[0].ui = norm[0][0];
v[1].ui = norm[0][1];
v[2].ui = norm[0][2];
v += 3;
norm = (GLuint (*)[4])((GLubyte *)norm + norm_stride);
}
if (DO_RGBA) {
UNCLAMPED_FLOAT_TO_UBYTE(v[0].rgba.red, col[0][0]);
UNCLAMPED_FLOAT_TO_UBYTE(v[0].rgba.green, col[0][1]);
UNCLAMPED_FLOAT_TO_UBYTE(v[0].rgba.blue, col[0][2]);
UNCLAMPED_FLOAT_TO_UBYTE(v[0].rgba.alpha, col[0][3]);
STRIDE_4F(col, col_stride);
v++;
}
if (DO_SPEC_OR_FOG) {
if (DO_SPEC) {
UNCLAMPED_FLOAT_TO_UBYTE(v[0].rgba.red, spec[0][0]);
UNCLAMPED_FLOAT_TO_UBYTE(v[0].rgba.green, spec[0][1]);
UNCLAMPED_FLOAT_TO_UBYTE(v[0].rgba.blue, spec[0][2]);
STRIDE_4F(spec, spec_stride);
}
if (DO_FOG) {
UNCLAMPED_FLOAT_TO_UBYTE(v[0].rgba.alpha, radeonComputeFogBlendFactor(ctx, fog[0][0]));
STRIDE_4F(fog, fog_stride);
}
if (TCL_DEBUG) fprintf(stderr, "%x ", v[0].ui);
v++;
}
if (DO_TEX0) {
v[0].ui = tc0[0][0];
v[1].ui = tc0[0][1];
if (TCL_DEBUG) fprintf(stderr, "t0: %.2f %.2f ", v[0].f, v[1].f);
if (DO_PTEX) {
if (fill_tex & (1<<0))
v[2].f = 1.0;
else if (rqcoordsnoswap & (1<<0))
v[2].ui = tc0[0][2];
else
v[2].ui = tc0[0][3];
if (TCL_DEBUG) fprintf(stderr, "%.2f ", v[2].f);
v += 3;
}
else
v += 2;
tc0 = (GLuint (*)[4])((GLubyte *)tc0 + tc0_stride);
}
if (DO_TEX1) {
v[0].ui = tc1[0][0];
v[1].ui = tc1[0][1];
if (TCL_DEBUG) fprintf(stderr, "t1: %.2f %.2f ", v[0].f, v[1].f);
if (DO_PTEX) {
if (fill_tex & (1<<1))
v[2].f = 1.0;
else if (rqcoordsnoswap & (1<<1))
v[2].ui = tc1[0][2];
else
v[2].ui = tc1[0][3];
if (TCL_DEBUG) fprintf(stderr, "%.2f ", v[2].f);
v += 3;
}
else
v += 2;
tc1 = (GLuint (*)[4])((GLubyte *)tc1 + tc1_stride);
}
if (DO_TEX2) {
v[0].ui = tc2[0][0];
v[1].ui = tc2[0][1];
if (TCL_DEBUG) fprintf(stderr, "t2: %.2f %.2f ", v[0].f, v[1].f);
if (DO_PTEX) {
if (fill_tex & (1<<2))
v[2].f = 1.0;
else if (rqcoordsnoswap & (1<<2))
v[2].ui = tc2[0][2];
else
v[2].ui = tc2[0][3];
if (TCL_DEBUG) fprintf(stderr, "%.2f ", v[2].f);
v += 3;
}
else
v += 2;
tc2 = (GLuint (*)[4])((GLubyte *)tc2 + tc2_stride);
}
if (TCL_DEBUG) fprintf(stderr, "\n");
}
}
}
static void TAG(init)( void )
{
int sz = 3;
if (DO_W) sz++;
if (DO_NORM) sz += 3;
if (DO_RGBA) sz++;
if (DO_SPEC_OR_FOG) sz++;
if (DO_TEX0) sz += 2;
if (DO_TEX0 && DO_PTEX) sz++;
if (DO_TEX1) sz += 2;
if (DO_TEX1 && DO_PTEX) sz++;
if (DO_TEX2) sz += 2;
if (DO_TEX2 && DO_PTEX) sz++;
setup_tab[IDX].emit = TAG(emit);
setup_tab[IDX].vertex_format = IND;
setup_tab[IDX].vertex_size = sz;
}
#undef IND
#undef TAG
#undef IDX

View File

@ -1,436 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VMware, Inc.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Keith Whitwell <keithw@vmware.com>
*/
#include "main/glheader.h"
#include "main/mtypes.h"
#include "main/state.h"
#include "vbo/vbo.h"
#include "math/m_translate.h"
#include "tnl/tnl.h"
#include "tnl/t_pipeline.h"
#include "radeon_context.h"
#include "radeon_state.h"
#include "radeon_ioctl.h"
#include "radeon_tex.h"
#include "radeon_tcl.h"
#include "radeon_swtcl.h"
#include "radeon_maos.h"
#include "radeon_fog.h"
#define RADEON_TCL_MAX_SETUP 19
union emit_union { float f; GLuint ui; radeon_color_t rgba; };
static struct {
void (*emit)( struct gl_context *, GLuint, GLuint, void * );
GLuint vertex_size;
GLuint vertex_format;
} setup_tab[RADEON_TCL_MAX_SETUP];
#define DO_W (IND & RADEON_CP_VC_FRMT_W0)
#define DO_RGBA (IND & RADEON_CP_VC_FRMT_PKCOLOR)
#define DO_SPEC_OR_FOG (IND & RADEON_CP_VC_FRMT_PKSPEC)
#define DO_SPEC ((IND & RADEON_CP_VC_FRMT_PKSPEC) && \
_mesa_need_secondary_color(ctx))
#define DO_FOG ((IND & RADEON_CP_VC_FRMT_PKSPEC) && ctx->Fog.Enabled && \
(ctx->Fog.FogCoordinateSource == GL_FOG_COORD))
#define DO_TEX0 ((IND & RADEON_CP_VC_FRMT_ST0) != 0)
#define DO_TEX1 ((IND & RADEON_CP_VC_FRMT_ST1) != 0)
#define DO_TEX2 ((IND & RADEON_CP_VC_FRMT_ST2) != 0)
#define DO_PTEX ((IND & RADEON_CP_VC_FRMT_Q0) != 0)
#define DO_NORM ((IND & RADEON_CP_VC_FRMT_N0) != 0)
#define DO_TEX3 0
#define GET_TEXSOURCE(n) n
/***********************************************************************
* Generate vertex emit functions *
***********************************************************************/
/* Defined in order of increasing vertex size:
*/
#define IDX 0
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_PKCOLOR)
#define TAG(x) x##_rgba
#include "radeon_maos_vbtmp.h"
#define IDX 1
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_N0)
#define TAG(x) x##_n
#include "radeon_maos_vbtmp.h"
#define IDX 2
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_PKCOLOR| \
RADEON_CP_VC_FRMT_ST0)
#define TAG(x) x##_rgba_st
#include "radeon_maos_vbtmp.h"
#define IDX 3
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_PKCOLOR| \
RADEON_CP_VC_FRMT_N0)
#define TAG(x) x##_rgba_n
#include "radeon_maos_vbtmp.h"
#define IDX 4
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_ST0| \
RADEON_CP_VC_FRMT_N0)
#define TAG(x) x##_st_n
#include "radeon_maos_vbtmp.h"
#define IDX 5
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_PKCOLOR| \
RADEON_CP_VC_FRMT_ST0| \
RADEON_CP_VC_FRMT_ST1)
#define TAG(x) x##_rgba_st_st
#include "radeon_maos_vbtmp.h"
#define IDX 6
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_PKCOLOR| \
RADEON_CP_VC_FRMT_ST0| \
RADEON_CP_VC_FRMT_N0)
#define TAG(x) x##_rgba_st_n
#include "radeon_maos_vbtmp.h"
#define IDX 7
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_PKCOLOR| \
RADEON_CP_VC_FRMT_PKSPEC| \
RADEON_CP_VC_FRMT_ST0| \
RADEON_CP_VC_FRMT_ST1)
#define TAG(x) x##_rgba_spec_st_st
#include "radeon_maos_vbtmp.h"
#define IDX 8
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_ST0| \
RADEON_CP_VC_FRMT_ST1| \
RADEON_CP_VC_FRMT_N0)
#define TAG(x) x##_st_st_n
#include "radeon_maos_vbtmp.h"
#define IDX 9
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_PKCOLOR| \
RADEON_CP_VC_FRMT_PKSPEC| \
RADEON_CP_VC_FRMT_ST0| \
RADEON_CP_VC_FRMT_ST1| \
RADEON_CP_VC_FRMT_N0)
#define TAG(x) x##_rgba_spec_st_st_n
#include "radeon_maos_vbtmp.h"
#define IDX 10
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_PKCOLOR| \
RADEON_CP_VC_FRMT_ST0| \
RADEON_CP_VC_FRMT_Q0)
#define TAG(x) x##_rgba_stq
#include "radeon_maos_vbtmp.h"
#define IDX 11
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_PKCOLOR| \
RADEON_CP_VC_FRMT_ST1| \
RADEON_CP_VC_FRMT_Q1| \
RADEON_CP_VC_FRMT_ST0| \
RADEON_CP_VC_FRMT_Q0)
#define TAG(x) x##_rgba_stq_stq
#include "radeon_maos_vbtmp.h"
#define IDX 12
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_W0| \
RADEON_CP_VC_FRMT_PKCOLOR| \
RADEON_CP_VC_FRMT_PKSPEC| \
RADEON_CP_VC_FRMT_ST0| \
RADEON_CP_VC_FRMT_Q0| \
RADEON_CP_VC_FRMT_ST1| \
RADEON_CP_VC_FRMT_Q1| \
RADEON_CP_VC_FRMT_N0)
#define TAG(x) x##_w_rgba_spec_stq_stq_n
#include "radeon_maos_vbtmp.h"
#define IDX 13
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_PKCOLOR| \
RADEON_CP_VC_FRMT_ST0| \
RADEON_CP_VC_FRMT_ST1| \
RADEON_CP_VC_FRMT_ST2)
#define TAG(x) x##_rgba_st_st_st
#include "radeon_maos_vbtmp.h"
#define IDX 14
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_PKCOLOR| \
RADEON_CP_VC_FRMT_PKSPEC| \
RADEON_CP_VC_FRMT_ST0| \
RADEON_CP_VC_FRMT_ST1| \
RADEON_CP_VC_FRMT_ST2)
#define TAG(x) x##_rgba_spec_st_st_st
#include "radeon_maos_vbtmp.h"
#define IDX 15
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_ST0| \
RADEON_CP_VC_FRMT_ST1| \
RADEON_CP_VC_FRMT_ST2| \
RADEON_CP_VC_FRMT_N0)
#define TAG(x) x##_st_st_st_n
#include "radeon_maos_vbtmp.h"
#define IDX 16
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_PKCOLOR| \
RADEON_CP_VC_FRMT_PKSPEC| \
RADEON_CP_VC_FRMT_ST0| \
RADEON_CP_VC_FRMT_ST1| \
RADEON_CP_VC_FRMT_ST2| \
RADEON_CP_VC_FRMT_N0)
#define TAG(x) x##_rgba_spec_st_st_st_n
#include "radeon_maos_vbtmp.h"
#define IDX 17
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_PKCOLOR| \
RADEON_CP_VC_FRMT_ST0| \
RADEON_CP_VC_FRMT_Q0| \
RADEON_CP_VC_FRMT_ST1| \
RADEON_CP_VC_FRMT_Q1| \
RADEON_CP_VC_FRMT_ST2| \
RADEON_CP_VC_FRMT_Q2)
#define TAG(x) x##_rgba_stq_stq_stq
#include "radeon_maos_vbtmp.h"
#define IDX 18
#define IND (RADEON_CP_VC_FRMT_XY| \
RADEON_CP_VC_FRMT_Z| \
RADEON_CP_VC_FRMT_W0| \
RADEON_CP_VC_FRMT_PKCOLOR| \
RADEON_CP_VC_FRMT_PKSPEC| \
RADEON_CP_VC_FRMT_ST0| \
RADEON_CP_VC_FRMT_Q0| \
RADEON_CP_VC_FRMT_ST1| \
RADEON_CP_VC_FRMT_Q1| \
RADEON_CP_VC_FRMT_ST2| \
RADEON_CP_VC_FRMT_Q2| \
RADEON_CP_VC_FRMT_N0)
#define TAG(x) x##_w_rgba_spec_stq_stq_stq_n
#include "radeon_maos_vbtmp.h"
/***********************************************************************
* Initialization
***********************************************************************/
static void init_tcl_verts( void )
{
init_rgba();
init_n();
init_rgba_n();
init_rgba_st();
init_st_n();
init_rgba_st_st();
init_rgba_st_n();
init_rgba_spec_st_st();
init_st_st_n();
init_rgba_spec_st_st_n();
init_rgba_stq();
init_rgba_stq_stq();
init_w_rgba_spec_stq_stq_n();
init_rgba_st_st_st();
init_rgba_spec_st_st_st();
init_st_st_st_n();
init_rgba_spec_st_st_st_n();
init_rgba_stq_stq_stq();
init_w_rgba_spec_stq_stq_stq_n();
}
void radeonEmitArrays( struct gl_context *ctx, GLuint inputs )
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
GLuint req = 0;
GLuint unit;
GLuint vtx = (rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] &
~(RADEON_TCL_VTX_Q0|RADEON_TCL_VTX_Q1|RADEON_TCL_VTX_Q2));
int i;
static int firsttime = 1;
if (firsttime) {
init_tcl_verts();
firsttime = 0;
}
if (1) {
req |= RADEON_CP_VC_FRMT_Z;
if (VB->AttribPtr[_TNL_ATTRIB_POS]->size == 4) {
req |= RADEON_CP_VC_FRMT_W0;
}
}
if (inputs & VERT_BIT_NORMAL) {
req |= RADEON_CP_VC_FRMT_N0;
}
if (inputs & VERT_BIT_COLOR0) {
req |= RADEON_CP_VC_FRMT_PKCOLOR;
}
if (inputs & (VERT_BIT_COLOR1|VERT_BIT_FOG)) {
req |= RADEON_CP_VC_FRMT_PKSPEC;
}
for (unit = 0; unit < ctx->Const.MaxTextureUnits; unit++) {
if (inputs & VERT_BIT_TEX(unit)) {
req |= RADEON_ST_BIT(unit);
/* assume we need the 3rd coord if texgen is active for r/q OR at least
3 coords are submitted. This may not be 100% correct */
if (VB->AttribPtr[_TNL_ATTRIB_TEX0 + unit]->size >= 3) {
req |= RADEON_Q_BIT(unit);
vtx |= RADEON_Q_BIT(unit);
}
if ( (ctx->Texture.FixedFuncUnit[unit].TexGenEnabled & (R_BIT | Q_BIT)) )
vtx |= RADEON_Q_BIT(unit);
else if ((VB->AttribPtr[_TNL_ATTRIB_TEX0 + unit]->size >= 3) &&
(!ctx->Texture.Unit[unit]._Current ||
ctx->Texture.Unit[unit]._Current->Target != GL_TEXTURE_CUBE_MAP)) {
GLuint swaptexmatcol = (VB->AttribPtr[_TNL_ATTRIB_TEX0 + unit]->size - 3);
if (((rmesa->NeedTexMatrix >> unit) & 1) &&
(swaptexmatcol != ((rmesa->TexMatColSwap >> unit) & 1)))
radeonUploadTexMatrix( rmesa, unit, swaptexmatcol ) ;
}
}
}
if (vtx != rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT]) {
RADEON_STATECHANGE( rmesa, tcl );
rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] = vtx;
}
for (i = 0 ; i < RADEON_TCL_MAX_SETUP ; i++)
if ((setup_tab[i].vertex_format & req) == req)
break;
if (rmesa->tcl.vertex_format == setup_tab[i].vertex_format &&
rmesa->radeon.tcl.aos[0].bo)
return;
if (rmesa->radeon.tcl.aos[0].bo)
radeonReleaseArrays( ctx, ~0 );
radeonAllocDmaRegion( &rmesa->radeon,
&rmesa->radeon.tcl.aos[0].bo,
&rmesa->radeon.tcl.aos[0].offset,
VB->Count * setup_tab[i].vertex_size * 4,
4);
/* The vertex code expects Obj to be clean to element 3. To fix
* this, add more vertex code (for obj-2, obj-3) or preferably move
* to maos.
*/
if (VB->AttribPtr[_TNL_ATTRIB_POS]->size < 3 ||
(VB->AttribPtr[_TNL_ATTRIB_POS]->size == 3 &&
(setup_tab[i].vertex_format & RADEON_CP_VC_FRMT_W0))) {
_math_trans_4f( rmesa->tcl.ObjClean.data,
VB->AttribPtr[_TNL_ATTRIB_POS]->data,
VB->AttribPtr[_TNL_ATTRIB_POS]->stride,
GL_FLOAT,
VB->AttribPtr[_TNL_ATTRIB_POS]->size,
0,
VB->Count );
switch (VB->AttribPtr[_TNL_ATTRIB_POS]->size) {
case 1:
_mesa_vector4f_clean_elem(&rmesa->tcl.ObjClean, VB->Count, 1);
FALLTHROUGH;
case 2:
_mesa_vector4f_clean_elem(&rmesa->tcl.ObjClean, VB->Count, 2);
FALLTHROUGH;
case 3:
if (setup_tab[i].vertex_format & RADEON_CP_VC_FRMT_W0) {
_mesa_vector4f_clean_elem(&rmesa->tcl.ObjClean, VB->Count, 3);
}
FALLTHROUGH;
case 4:
default:
break;
}
VB->AttribPtr[_TNL_ATTRIB_POS] = &rmesa->tcl.ObjClean;
}
radeon_bo_map(rmesa->radeon.tcl.aos[0].bo, 1);
setup_tab[i].emit( ctx, 0, VB->Count,
rmesa->radeon.tcl.aos[0].bo->ptr + rmesa->radeon.tcl.aos[0].offset);
radeon_bo_unmap(rmesa->radeon.tcl.aos[0].bo);
// rmesa->radeon.tcl.aos[0].size = setup_tab[i].vertex_size;
rmesa->radeon.tcl.aos[0].stride = setup_tab[i].vertex_size;
rmesa->tcl.vertex_format = setup_tab[i].vertex_format;
rmesa->radeon.tcl.aos_count = 1;
}

View File

@ -1,580 +0,0 @@
/*
* Copyright (C) 2009 Maciej Cencora.
* Copyright (C) 2008 Nicolai Haehnle.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "radeon_mipmap_tree.h"
#include <errno.h>
#include <unistd.h>
#include "main/teximage.h"
#include "main/texobj.h"
#include "main/enums.h"
#include "util/u_memory.h"
#include "radeon_texture.h"
#include "radeon_tile.h"
static unsigned get_aligned_compressed_row_stride(
mesa_format format,
unsigned width,
unsigned minStride)
{
const unsigned blockBytes = _mesa_get_format_bytes(format);
unsigned blockWidth, blockHeight;
unsigned stride;
_mesa_get_format_block_size(format, &blockWidth, &blockHeight);
/* Count number of blocks required to store the given width.
* And then multiple it with bytes required to store a block.
*/
stride = (width + blockWidth - 1) / blockWidth * blockBytes;
/* Round the given minimum stride to the next full blocksize.
* (minStride + blockBytes - 1) / blockBytes * blockBytes
*/
if ( stride < minStride )
stride = (minStride + blockBytes - 1) / blockBytes * blockBytes;
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s width %u, minStride %u, block(bytes %u, width %u):"
"stride %u\n",
__func__, width, minStride,
blockBytes, blockWidth,
stride);
return stride;
}
unsigned get_texture_image_size(
mesa_format format,
unsigned rowStride,
unsigned height,
unsigned depth,
unsigned tiling)
{
if (_mesa_is_format_compressed(format)) {
unsigned blockWidth, blockHeight;
_mesa_get_format_block_size(format, &blockWidth, &blockHeight);
return rowStride * ((height + blockHeight - 1) / blockHeight) * depth;
} else if (tiling) {
/* Need to align height to tile height */
unsigned tileWidth, tileHeight;
get_tile_size(format, &tileWidth, &tileHeight);
tileHeight--;
height = (height + tileHeight) & ~tileHeight;
}
return rowStride * height * depth;
}
unsigned get_texture_image_row_stride(radeonContextPtr rmesa, mesa_format format, unsigned width, unsigned tiling, GLuint target)
{
if (_mesa_is_format_compressed(format)) {
return get_aligned_compressed_row_stride(format, width, rmesa->texture_compressed_row_align);
} else {
unsigned row_align;
if (!util_is_power_of_two_or_zero(width) || target == GL_TEXTURE_RECTANGLE) {
row_align = rmesa->texture_rect_row_align - 1;
} else if (tiling) {
unsigned tileWidth, tileHeight;
get_tile_size(format, &tileWidth, &tileHeight);
row_align = tileWidth * _mesa_get_format_bytes(format) - 1;
} else {
row_align = rmesa->texture_row_align - 1;
}
return (_mesa_format_row_stride(format, width) + row_align) & ~row_align;
}
}
/**
* Compute sizes and fill in offset and blit information for the given
* image (determined by \p face and \p level).
*
* \param curOffset points to the offset at which the image is to be stored
* and is updated by this function according to the size of the image.
*/
static void compute_tex_image_offset(radeonContextPtr rmesa, radeon_mipmap_tree *mt,
GLuint face, GLuint level, GLuint* curOffset)
{
radeon_mipmap_level *lvl = &mt->levels[level];
GLuint height;
height = util_next_power_of_two(lvl->height);
lvl->rowstride = get_texture_image_row_stride(rmesa, mt->mesaFormat, lvl->width, mt->tilebits, mt->target);
lvl->size = get_texture_image_size(mt->mesaFormat, lvl->rowstride, height, lvl->depth, mt->tilebits);
assert(lvl->size > 0);
lvl->faces[face].offset = *curOffset;
*curOffset += lvl->size;
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s(%p) level %d, face %d: rs:%d %dx%d at %d\n",
__func__, rmesa,
level, face,
lvl->rowstride, lvl->width, height, lvl->faces[face].offset);
}
static void calculate_miptree_layout(radeonContextPtr rmesa, radeon_mipmap_tree *mt)
{
GLuint curOffset, i, face, level;
assert(1 << (mt->numLevels - 1) <= rmesa->glCtx.Const.MaxTextureSize);
curOffset = 0;
for(face = 0; face < mt->faces; face++) {
for(i = 0, level = mt->baseLevel; i < mt->numLevels; i++, level++) {
mt->levels[level].valid = 1;
mt->levels[level].width = minify(mt->width0, i);
mt->levels[level].height = minify(mt->height0, i);
mt->levels[level].depth = minify(mt->depth0, i);
compute_tex_image_offset(rmesa, mt, face, level, &curOffset);
}
}
/* Note the required size in memory */
mt->totalsize = (curOffset + RADEON_OFFSET_MASK) & ~RADEON_OFFSET_MASK;
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s(%p, %p) total size %d\n",
__func__, rmesa, mt, mt->totalsize);
}
/**
* Create a new mipmap tree, calculate its layout and allocate memory.
*/
radeon_mipmap_tree* radeon_miptree_create(radeonContextPtr rmesa,
GLenum target, mesa_format mesaFormat, GLuint baseLevel, GLuint numLevels,
GLuint width0, GLuint height0, GLuint depth0, GLuint tilebits)
{
radeon_mipmap_tree *mt = CALLOC_STRUCT(_radeon_mipmap_tree);
radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
"%s(%p) new tree is %p.\n",
__func__, rmesa, mt);
mt->mesaFormat = mesaFormat;
mt->refcount = 1;
mt->target = target;
mt->faces = _mesa_num_tex_faces(target);
mt->baseLevel = baseLevel;
mt->numLevels = numLevels;
mt->width0 = width0;
mt->height0 = height0;
mt->depth0 = depth0;
mt->tilebits = tilebits;
calculate_miptree_layout(rmesa, mt);
mt->bo = radeon_bo_open(rmesa->radeonScreen->bom,
0, mt->totalsize, 1024,
RADEON_GEM_DOMAIN_VRAM,
0);
return mt;
}
void radeon_miptree_reference(radeon_mipmap_tree *mt, radeon_mipmap_tree **ptr)
{
assert(!*ptr);
mt->refcount++;
assert(mt->refcount > 0);
*ptr = mt;
}
void radeon_miptree_unreference(radeon_mipmap_tree **ptr)
{
radeon_mipmap_tree *mt = *ptr;
if (!mt)
return;
assert(mt->refcount > 0);
mt->refcount--;
if (!mt->refcount) {
radeon_bo_unref(mt->bo);
free(mt);
}
*ptr = 0;
}
/**
* Calculate min and max LOD for the given texture object.
* @param[in] tObj texture object whose LOD values to calculate
* @param[out] pminLod minimal LOD
* @param[out] pmaxLod maximal LOD
*/
static void calculate_min_max_lod(struct gl_sampler_object *samp, struct gl_texture_object *tObj,
unsigned *pminLod, unsigned *pmaxLod)
{
int minLod, maxLod;
/* Yes, this looks overly complicated, but it's all needed.
*/
switch (tObj->Target) {
case GL_TEXTURE_1D:
case GL_TEXTURE_2D:
case GL_TEXTURE_3D:
case GL_TEXTURE_CUBE_MAP:
if (samp->Attrib.MinFilter == GL_NEAREST || samp->Attrib.MinFilter == GL_LINEAR) {
/* GL_NEAREST and GL_LINEAR only care about GL_TEXTURE_BASE_LEVEL.
*/
minLod = maxLod = tObj->Attrib.BaseLevel;
} else {
minLod = tObj->Attrib.BaseLevel + (GLint)(samp->Attrib.MinLod);
minLod = MAX2(minLod, tObj->Attrib.BaseLevel);
minLod = MIN2(minLod, tObj->Attrib.MaxLevel);
maxLod = tObj->Attrib.BaseLevel + (GLint)(samp->Attrib.MaxLod + 0.5);
maxLod = MIN2(maxLod, tObj->Attrib.MaxLevel);
maxLod = MIN2(maxLod, tObj->Image[0][minLod]->MaxNumLevels - 1 + minLod);
maxLod = MAX2(maxLod, minLod); /* need at least one level */
}
break;
case GL_TEXTURE_RECTANGLE_NV:
case GL_TEXTURE_4D_SGIS:
minLod = maxLod = 0;
break;
default:
return;
}
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s(%p) target %s, min %d, max %d.\n",
__func__, tObj,
_mesa_enum_to_string(tObj->Target),
minLod, maxLod);
/* save these values */
*pminLod = minLod;
*pmaxLod = maxLod;
}
/**
* Checks whether the given miptree can hold the given texture image at the
* given face and level.
*/
GLboolean radeon_miptree_matches_image(radeon_mipmap_tree *mt,
struct gl_texture_image *texImage)
{
radeon_mipmap_level *lvl;
GLuint level = texImage->Level;
if (texImage->TexFormat != mt->mesaFormat)
return GL_FALSE;
lvl = &mt->levels[level];
if (!lvl->valid ||
lvl->width != texImage->Width ||
lvl->height != texImage->Height ||
lvl->depth != texImage->Depth)
return GL_FALSE;
return GL_TRUE;
}
/**
* Checks whether the given miptree has the right format to store the given texture object.
*/
static GLboolean radeon_miptree_matches_texture(radeon_mipmap_tree *mt, struct gl_texture_object *texObj)
{
struct gl_texture_image *firstImage;
unsigned numLevels;
radeon_mipmap_level *mtBaseLevel;
if (texObj->Attrib.BaseLevel < mt->baseLevel)
return GL_FALSE;
mtBaseLevel = &mt->levels[texObj->Attrib.BaseLevel - mt->baseLevel];
firstImage = texObj->Image[0][texObj->Attrib.BaseLevel];
numLevels = MIN2(texObj->_MaxLevel - texObj->Attrib.BaseLevel + 1, firstImage->MaxNumLevels);
if (radeon_is_debug_enabled(RADEON_TEXTURE,RADEON_TRACE)) {
fprintf(stderr, "Checking if miptree %p matches texObj %p\n", mt, texObj);
fprintf(stderr, "target %d vs %d\n", mt->target, texObj->Target);
fprintf(stderr, "format %d vs %d\n", mt->mesaFormat, firstImage->TexFormat);
fprintf(stderr, "numLevels %d vs %d\n", mt->numLevels, numLevels);
fprintf(stderr, "width0 %d vs %d\n", mtBaseLevel->width, firstImage->Width);
fprintf(stderr, "height0 %d vs %d\n", mtBaseLevel->height, firstImage->Height);
fprintf(stderr, "depth0 %d vs %d\n", mtBaseLevel->depth, firstImage->Depth);
if (mt->target == texObj->Target &&
mt->mesaFormat == firstImage->TexFormat &&
mt->numLevels >= numLevels &&
mtBaseLevel->width == firstImage->Width &&
mtBaseLevel->height == firstImage->Height &&
mtBaseLevel->depth == firstImage->Depth) {
fprintf(stderr, "MATCHED\n");
} else {
fprintf(stderr, "NOT MATCHED\n");
}
}
return (mt->target == texObj->Target &&
mt->mesaFormat == firstImage->TexFormat &&
mt->numLevels >= numLevels &&
mtBaseLevel->width == firstImage->Width &&
mtBaseLevel->height == firstImage->Height &&
mtBaseLevel->depth == firstImage->Depth);
}
/**
* Try to allocate a mipmap tree for the given texture object.
* @param[in] rmesa radeon context
* @param[in] t radeon texture object
*/
void radeon_try_alloc_miptree(radeonContextPtr rmesa, radeonTexObj *t)
{
struct gl_texture_object *texObj = &t->base;
struct gl_texture_image *texImg = texObj->Image[0][texObj->Attrib.BaseLevel];
GLuint numLevels;
assert(!t->mt);
if (!texImg) {
radeon_warning("%s(%p) No image in given texture object(%p).\n",
__func__, rmesa, t);
return;
}
numLevels = MIN2(texObj->Attrib.MaxLevel - texObj->Attrib.BaseLevel + 1, texImg->MaxNumLevels);
t->mt = radeon_miptree_create(rmesa, t->base.Target,
texImg->TexFormat, texObj->Attrib.BaseLevel,
numLevels, texImg->Width, texImg->Height,
texImg->Depth, t->tile_bits);
}
GLuint
radeon_miptree_image_offset(radeon_mipmap_tree *mt,
GLuint face, GLuint level)
{
if (mt->target == GL_TEXTURE_CUBE_MAP_ARB)
return (mt->levels[level].faces[face].offset);
else
return mt->levels[level].faces[0].offset;
}
/**
* Ensure that the given image is stored in the given miptree from now on.
*/
static void migrate_image_to_miptree(radeon_mipmap_tree *mt,
radeon_texture_image *image,
int face, int level)
{
radeon_mipmap_level *dstlvl = &mt->levels[level];
unsigned char *dest;
assert(image->mt != mt);
assert(dstlvl->valid);
assert(dstlvl->width == image->base.Base.Width);
assert(dstlvl->height == image->base.Base.Height);
assert(dstlvl->depth == image->base.Base.Depth);
radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
"%s miptree %p, image %p, face %d, level %d.\n",
__func__, mt, image, face, level);
radeon_bo_map(mt->bo, GL_TRUE);
dest = mt->bo->ptr + dstlvl->faces[face].offset;
if (image->mt) {
/* Format etc. should match, so we really just need a memcpy().
* In fact, that memcpy() could be done by the hardware in many
* cases, provided that we have a proper memory manager.
*/
assert(mt->mesaFormat == image->base.Base.TexFormat);
radeon_mipmap_level *srclvl = &image->mt->levels[image->base.Base.Level];
assert(image->base.Base.Level == level);
assert(srclvl->size == dstlvl->size);
assert(srclvl->rowstride == dstlvl->rowstride);
radeon_bo_map(image->mt->bo, GL_FALSE);
memcpy(dest,
image->mt->bo->ptr + srclvl->faces[face].offset,
dstlvl->size);
radeon_bo_unmap(image->mt->bo);
radeon_miptree_unreference(&image->mt);
}
radeon_bo_unmap(mt->bo);
radeon_miptree_reference(mt, &image->mt);
}
/**
* Filter matching miptrees, and select one with the most of data.
* @param[in] texObj radeon texture object
* @param[in] firstLevel first texture level to check
* @param[in] lastLevel last texture level to check
*/
static radeon_mipmap_tree * get_biggest_matching_miptree(radeonTexObj *texObj,
unsigned firstLevel,
unsigned lastLevel)
{
const unsigned numLevels = lastLevel - firstLevel + 1;
unsigned *mtSizes = calloc(numLevels, sizeof(unsigned));
radeon_mipmap_tree **mts = calloc(numLevels, sizeof(radeon_mipmap_tree *));
unsigned mtCount = 0;
unsigned maxMtIndex = 0;
radeon_mipmap_tree *tmp;
unsigned int level;
int i;
for (level = firstLevel; level <= lastLevel; ++level) {
radeon_texture_image *img = get_radeon_texture_image(texObj->base.Image[0][level]);
unsigned found = 0;
// TODO: why this hack??
if (!img)
break;
if (!img->mt)
continue;
for (i = 0; i < mtCount; ++i) {
if (mts[i] == img->mt) {
found = 1;
mtSizes[i] += img->mt->levels[img->base.Base.Level].size;
break;
}
}
if (!found && radeon_miptree_matches_texture(img->mt, &texObj->base)) {
mtSizes[mtCount] = img->mt->levels[img->base.Base.Level].size;
mts[mtCount] = img->mt;
mtCount++;
}
}
if (mtCount == 0) {
free(mtSizes);
free(mts);
return NULL;
}
for (i = 1; i < mtCount; ++i) {
if (mtSizes[i] > mtSizes[maxMtIndex]) {
maxMtIndex = i;
}
}
tmp = mts[maxMtIndex];
free(mtSizes);
free(mts);
return tmp;
}
/**
* Validate texture mipmap tree.
* If individual images are stored in different mipmap trees
* use the mipmap tree that has the most of the correct data.
*/
int radeon_validate_texture_miptree(struct gl_context * ctx,
struct gl_sampler_object *samp,
struct gl_texture_object *texObj)
{
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
radeonTexObj *t = radeon_tex_obj(texObj);
radeon_mipmap_tree *dst_miptree;
if (samp == &texObj->Sampler && (t->validated || t->image_override)) {
return GL_TRUE;
}
calculate_min_max_lod(samp, &t->base, &t->minLod, &t->maxLod);
radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
"%s: Validating texture %p now, minLod = %d, maxLod = %d\n",
__func__, texObj ,t->minLod, t->maxLod);
dst_miptree = get_biggest_matching_miptree(t, t->base.Attrib.BaseLevel, t->base._MaxLevel);
radeon_miptree_unreference(&t->mt);
if (!dst_miptree) {
radeon_try_alloc_miptree(rmesa, t);
radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
"%s: No matching miptree found, allocated new one %p\n",
__func__, t->mt);
} else {
radeon_miptree_reference(dst_miptree, &t->mt);
radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
"%s: Using miptree %p\n", __func__, t->mt);
}
const unsigned faces = _mesa_num_tex_faces(texObj->Target);
unsigned face, level;
radeon_texture_image *img;
/* Validate only the levels that will actually be used during rendering */
for (face = 0; face < faces; ++face) {
for (level = t->minLod; level <= t->maxLod; ++level) {
img = get_radeon_texture_image(texObj->Image[face][level]);
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"Checking image level %d, face %d, mt %p ... ",
level, face, img->mt);
if (img->mt != t->mt && !img->used_as_render_target) {
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"MIGRATING\n");
struct radeon_bo *src_bo = (img->mt) ? img->mt->bo : img->bo;
if (src_bo && radeon_bo_is_referenced_by_cs(src_bo, rmesa->cmdbuf.cs)) {
radeon_firevertices(rmesa);
}
migrate_image_to_miptree(t->mt, img, face, level);
} else
radeon_print(RADEON_TEXTURE, RADEON_TRACE, "OK\n");
}
}
t->validated = GL_TRUE;
return GL_TRUE;
}
uint32_t get_base_teximage_offset(radeonTexObj *texObj)
{
if (!texObj->mt) {
return 0;
} else {
return radeon_miptree_image_offset(texObj->mt, 0, texObj->minLod);
}
}

View File

@ -1,106 +0,0 @@
/*
* Copyright (C) 2008 Nicolai Haehnle.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __RADEON_MIPMAP_TREE_H_
#define __RADEON_MIPMAP_TREE_H_
#include "radeon_common.h"
typedef struct _radeon_mipmap_tree radeon_mipmap_tree;
typedef struct _radeon_mipmap_level radeon_mipmap_level;
typedef struct _radeon_mipmap_image radeon_mipmap_image;
struct _radeon_mipmap_image {
GLuint offset; /** Offset of this image from the start of mipmap tree buffer, in bytes */
};
struct _radeon_mipmap_level {
GLuint width;
GLuint height;
GLuint depth;
GLuint size; /** Size of each image, in bytes */
GLuint rowstride; /** in bytes */
GLuint valid;
radeon_mipmap_image faces[6];
};
/* store the max possible in the miptree */
#define RADEON_MIPTREE_MAX_TEXTURE_LEVELS 15
/**
* A mipmap tree contains texture images in the layout that the hardware
* expects.
*
* The meta-data of mipmap trees is immutable, i.e. you cannot change the
* layout on-the-fly; however, the texture contents (i.e. texels) can be
* changed.
*/
struct _radeon_mipmap_tree {
struct radeon_bo *bo;
GLuint refcount;
GLuint totalsize; /** total size of the miptree, in bytes */
GLenum target; /** GL_TEXTURE_xxx */
GLenum mesaFormat; /** MESA_FORMAT_xxx */
GLuint faces; /** # of faces: 6 for cubemaps, 1 otherwise */
GLuint baseLevel; /** gl_texture_object->baseLevel it was created for */
GLuint numLevels; /** Number of mip levels stored in this mipmap tree */
GLuint width0; /** Width of baseLevel image */
GLuint height0; /** Height of baseLevel image */
GLuint depth0; /** Depth of baseLevel image */
GLuint tilebits; /** RADEON_TXO_xxx_TILE */
radeon_mipmap_level levels[RADEON_MIPTREE_MAX_TEXTURE_LEVELS];
};
void radeon_miptree_reference(radeon_mipmap_tree *mt, radeon_mipmap_tree **ptr);
void radeon_miptree_unreference(radeon_mipmap_tree **ptr);
GLboolean radeon_miptree_matches_image(radeon_mipmap_tree *mt,
struct gl_texture_image *texImage);
void radeon_try_alloc_miptree(radeonContextPtr rmesa, radeonTexObj *t);
GLuint radeon_miptree_image_offset(radeon_mipmap_tree *mt,
GLuint face, GLuint level);
uint32_t get_base_teximage_offset(radeonTexObj *texObj);
unsigned get_texture_image_row_stride(radeonContextPtr rmesa, mesa_format format, unsigned width, unsigned tiling, unsigned target);
unsigned get_texture_image_size(
mesa_format format,
unsigned rowStride,
unsigned height,
unsigned depth,
unsigned tiling);
radeon_mipmap_tree *radeon_miptree_create(radeonContextPtr rmesa,
GLenum target, mesa_format mesaFormat, GLuint baseLevel, GLuint numLevels,
GLuint width0, GLuint height0, GLuint depth0, GLuint tilebits);
#endif /* __RADEON_MIPMAP_TREE_H_ */

View File

@ -1,221 +0,0 @@
/*
* Copyright (C) 2010 Maciej Cencora <m.cencora@gmail.com>
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "stdint.h"
#include "main/bufferobj.h"
#include "main/enums.h"
#include "main/fbobject.h"
#include "main/image.h"
#include "main/readpix.h"
#include "main/state.h"
#include "radeon_common_context.h"
#include "radeon_buffer_objects.h"
#include "radeon_debug.h"
#include "radeon_mipmap_tree.h"
static mesa_format gl_format_and_type_to_mesa_format(GLenum format, GLenum type)
{
switch (format)
{
case GL_RGB:
switch (type) {
case GL_UNSIGNED_SHORT_5_6_5:
return MESA_FORMAT_B5G6R5_UNORM;
case GL_UNSIGNED_SHORT_5_6_5_REV:
return MESA_FORMAT_R5G6B5_UNORM;
}
break;
case GL_RGBA:
switch (type) {
case GL_FLOAT:
return MESA_FORMAT_RGBA_FLOAT32;
case GL_UNSIGNED_SHORT_5_5_5_1:
return MESA_FORMAT_A1B5G5R5_UNORM;
case GL_UNSIGNED_INT_8_8_8_8:
return MESA_FORMAT_A8B8G8R8_UNORM;
case GL_UNSIGNED_BYTE:
case GL_UNSIGNED_INT_8_8_8_8_REV:
return MESA_FORMAT_R8G8B8A8_UNORM;
}
break;
case GL_BGRA:
switch (type) {
case GL_UNSIGNED_SHORT_4_4_4_4:
return MESA_FORMAT_A4R4G4B4_UNORM;
case GL_UNSIGNED_SHORT_4_4_4_4_REV:
return MESA_FORMAT_B4G4R4A4_UNORM;
case GL_UNSIGNED_SHORT_5_5_5_1:
return MESA_FORMAT_A1R5G5B5_UNORM;
case GL_UNSIGNED_SHORT_1_5_5_5_REV:
return MESA_FORMAT_B5G5R5A1_UNORM;
case GL_UNSIGNED_INT_8_8_8_8:
return MESA_FORMAT_A8R8G8B8_UNORM;
case GL_UNSIGNED_BYTE:
case GL_UNSIGNED_INT_8_8_8_8_REV:
return MESA_FORMAT_B8G8R8A8_UNORM;
}
break;
}
return MESA_FORMAT_NONE;
}
static GLboolean
do_blit_readpixels(struct gl_context * ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
const struct radeon_renderbuffer *rrb = radeon_renderbuffer(ctx->ReadBuffer->_ColorReadBuffer);
const mesa_format dst_format = gl_format_and_type_to_mesa_format(format, type);
unsigned dst_rowstride, dst_imagesize, aligned_rowstride, flip_y;
struct radeon_bo *dst_buffer;
GLint dst_x = 0, dst_y = 0;
intptr_t dst_offset;
/* It's not worth if number of pixels to copy is really small */
if (width * height < 100) {
return GL_FALSE;
}
if (dst_format == MESA_FORMAT_NONE ||
!radeon->vtbl.check_blit(dst_format, rrb->pitch / rrb->cpp) || !radeon->vtbl.blit) {
return GL_FALSE;
}
if (ctx->_ImageTransferState || ctx->Color.ColorLogicOpEnabled) {
return GL_FALSE;
}
if (pack->SwapBytes || pack->LsbFirst) {
return GL_FALSE;
}
if (pack->RowLength > 0) {
dst_rowstride = pack->RowLength;
} else {
dst_rowstride = width;
}
if (!_mesa_clip_copytexsubimage(ctx, &dst_x, &dst_y, &x, &y, &width, &height)) {
return GL_TRUE;
}
assert(x >= 0 && y >= 0);
aligned_rowstride = get_texture_image_row_stride(radeon, dst_format, dst_rowstride, 0, GL_TEXTURE_2D);
dst_rowstride *= _mesa_get_format_bytes(dst_format);
if (pack->BufferObj && aligned_rowstride != dst_rowstride)
return GL_FALSE;
dst_imagesize = get_texture_image_size(dst_format,
aligned_rowstride,
height, 1, 0);
if (!pack->BufferObj)
{
dst_buffer = radeon_bo_open(radeon->radeonScreen->bom, 0, dst_imagesize, 1024, RADEON_GEM_DOMAIN_GTT, 0);
dst_offset = 0;
}
else
{
dst_buffer = get_radeon_buffer_object(pack->BufferObj)->bo;
dst_offset = (intptr_t)pixels;
}
/* Disable source Y flipping for FBOs */
flip_y = _mesa_is_winsys_fbo(ctx->ReadBuffer);
if (pack->Invert) {
y = rrb->base.Base.Height - height - y;
flip_y = !flip_y;
}
if (radeon->vtbl.blit(ctx,
rrb->bo,
rrb->draw_offset,
rrb->base.Base.Format,
rrb->pitch / rrb->cpp,
rrb->base.Base.Width,
rrb->base.Base.Height,
x,
y,
dst_buffer,
dst_offset,
dst_format,
aligned_rowstride / _mesa_get_format_bytes(dst_format),
width,
height,
0, /* dst_x */
0, /* dst_y */
width,
height,
flip_y))
{
if (!pack->BufferObj)
{
radeon_bo_map(dst_buffer, 0);
copy_rows(pixels, dst_rowstride, dst_buffer->ptr,
aligned_rowstride, height, dst_rowstride);
radeon_bo_unmap(dst_buffer);
radeon_bo_unref(dst_buffer);
}
return GL_TRUE;
}
if (!pack->BufferObj)
radeon_bo_unref(dst_buffer);
return GL_FALSE;
}
void
radeonReadPixels(struct gl_context * ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
radeon_prepare_render(radeon);
if (do_blit_readpixels(ctx, x, y, width, height, format, type, pack, pixels))
return;
/* Update Mesa state before calling _mesa_readpixels().
* XXX this may not be needed since ReadPixels no longer uses the
* span code.
*/
radeon_print(RADEON_FALLBACKS, RADEON_NORMAL,
"Falling back to sw for ReadPixels (format %s, type %s)\n",
_mesa_enum_to_string(format), _mesa_enum_to_string(type));
if (ctx->NewState)
_mesa_update_state(ctx);
_mesa_readpixels(ctx, x, y, width, height, format, type, pack, pixels);
}

View File

@ -1,217 +0,0 @@
/*
* Copyright © 2008-2009 Maciej Cencora <m.cencora@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Maciej Cencora <m.cencora@gmail.com>
*
*/
#include "radeon_common.h"
#include "radeon_queryobj.h"
#include "radeon_debug.h"
#include "main/queryobj.h"
#include <inttypes.h>
static void radeonQueryGetResult(struct gl_context *ctx, struct gl_query_object *q)
{
struct radeon_query_object *query = (struct radeon_query_object *)q;
uint32_t *result;
int i;
radeon_print(RADEON_STATE, RADEON_VERBOSE,
"%s: query id %d, result %d\n",
__func__, query->Base.Id, (int) query->Base.Result);
radeon_bo_map(query->bo, GL_FALSE);
result = query->bo->ptr;
query->Base.Result = 0;
for (i = 0; i < query->curr_offset/sizeof(uint32_t); ++i) {
query->Base.Result += LE32_TO_CPU(result[i]);
radeon_print(RADEON_STATE, RADEON_TRACE, "result[%d] = %d\n", i, LE32_TO_CPU(result[i]));
}
radeon_bo_unmap(query->bo);
}
static struct gl_query_object * radeonNewQueryObject(struct gl_context *ctx, GLuint id)
{
struct radeon_query_object *query;
query = calloc(1, sizeof(struct radeon_query_object));
query->Base.Id = id;
query->Base.Result = 0;
query->Base.Active = GL_FALSE;
query->Base.Ready = GL_TRUE;
radeon_print(RADEON_STATE, RADEON_VERBOSE,"%s: query id %d\n", __func__, query->Base.Id);
return &query->Base;
}
static void radeonDeleteQuery(struct gl_context *ctx, struct gl_query_object *q)
{
struct radeon_query_object *query = (struct radeon_query_object *)q;
radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __func__, q->Id);
if (query->bo) {
radeon_bo_unref(query->bo);
}
_mesa_delete_query(ctx, q);
}
static void radeonWaitQuery(struct gl_context *ctx, struct gl_query_object *q)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
struct radeon_query_object *query = (struct radeon_query_object *)q;
/* If the cmdbuf with packets for this query hasn't been flushed yet, do it now */
if (radeon_bo_is_referenced_by_cs(query->bo, radeon->cmdbuf.cs))
ctx->Driver.Flush(ctx, 0);
radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s: query id %d, bo %p, offset %d\n", __func__, q->Id, query->bo, query->curr_offset);
radeonQueryGetResult(ctx, q);
query->Base.Ready = GL_TRUE;
}
static void radeonBeginQuery(struct gl_context *ctx, struct gl_query_object *q)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
struct radeon_query_object *query = (struct radeon_query_object *)q;
radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __func__, q->Id);
assert(radeon->query.current == NULL);
if (radeon->dma.flush)
radeon->dma.flush(&radeon->glCtx);
if (!query->bo) {
query->bo = radeon_bo_open(radeon->radeonScreen->bom, 0, RADEON_QUERY_PAGE_SIZE, RADEON_QUERY_PAGE_SIZE, RADEON_GEM_DOMAIN_GTT, 0);
}
query->curr_offset = 0;
radeon->query.current = query;
radeon->query.queryobj.dirty = GL_TRUE;
radeon->hw.is_dirty = GL_TRUE;
}
void radeonEmitQueryEnd(struct gl_context *ctx)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
struct radeon_query_object *query = radeon->query.current;
if (!query)
return;
if (query->emitted_begin == GL_FALSE)
return;
radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d, bo %p, offset %d\n", __func__, query->Base.Id, query->bo, query->curr_offset);
radeon_cs_space_check_with_bo(radeon->cmdbuf.cs,
query->bo,
0, RADEON_GEM_DOMAIN_GTT);
radeon->vtbl.emit_query_finish(radeon);
}
static void radeonEndQuery(struct gl_context *ctx, struct gl_query_object *q)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __func__, q->Id);
if (radeon->dma.flush)
radeon->dma.flush(&radeon->glCtx);
radeonEmitQueryEnd(ctx);
radeon->query.current = NULL;
}
static void radeonCheckQuery(struct gl_context *ctx, struct gl_query_object *q)
{
radeon_print(RADEON_STATE, RADEON_TRACE, "%s: query id %d\n", __func__, q->Id);
\
#ifdef DRM_RADEON_GEM_BUSY
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
struct radeon_query_object *query = (struct radeon_query_object *)q;
uint32_t domain;
/* Need to perform a flush, as per ARB_occlusion_query spec */
if (radeon_bo_is_referenced_by_cs(query->bo, radeon->cmdbuf.cs)) {
ctx->Driver.Flush(ctx, 0);
}
if (radeon_bo_is_busy(query->bo, &domain) == 0) {
radeonQueryGetResult(ctx, q);
query->Base.Ready = GL_TRUE;
}
#else
radeonWaitQuery(ctx, q);
#endif
}
void radeonInitQueryObjFunctions(struct dd_function_table *functions)
{
functions->NewQueryObject = radeonNewQueryObject;
functions->DeleteQuery = radeonDeleteQuery;
functions->BeginQuery = radeonBeginQuery;
functions->EndQuery = radeonEndQuery;
functions->CheckQuery = radeonCheckQuery;
functions->WaitQuery = radeonWaitQuery;
}
int radeon_check_query_active(struct gl_context *ctx, struct radeon_state_atom *atom)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
struct radeon_query_object *query = radeon->query.current;
if (!query || query->emitted_begin)
return 0;
return atom->cmd_size;
}
void radeon_emit_queryobj(struct gl_context *ctx, struct radeon_state_atom *atom)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
BATCH_LOCALS(radeon);
int dwords;
dwords = atom->check(ctx, atom);
BEGIN_BATCH(dwords);
OUT_BATCH_TABLE(atom->cmd, dwords);
END_BATCH();
radeon->query.current->emitted_begin = GL_TRUE;
}

View File

@ -1,54 +0,0 @@
/*
* Copyright © 2008 Maciej Cencora <m.cencora@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Maciej Cencora <m.cencora@gmail.com>
*
*/
#include "util/simple_list.h"
#include "radeon_common_context.h"
extern void radeonEmitQueryBegin(struct gl_context *ctx);
extern void radeonEmitQueryEnd(struct gl_context *ctx);
extern void radeonInitQueryObjFunctions(struct dd_function_table *functions);
#define RADEON_QUERY_PAGE_SIZE 4096
int radeon_check_query_active(struct gl_context *ctx, struct radeon_state_atom *atom);
void radeon_emit_queryobj(struct gl_context *ctx, struct radeon_state_atom *atom);
static inline void radeon_init_query_stateobj(radeonContextPtr radeon, int SZ)
{
radeon->query.queryobj.cmd_size = (SZ);
radeon->query.queryobj.cmd = calloc(SZ, sizeof(uint32_t));
radeon->query.queryobj.name = "queryobj";
radeon->query.queryobj.idx = 0;
radeon->query.queryobj.check = radeon_check_query_active;
radeon->query.queryobj.dirty = GL_FALSE;
radeon->query.queryobj.emit = radeon_emit_queryobj;
radeon->hw.max_state_size += (SZ);
insert_at_tail(&radeon->hw.atomlist, &radeon->query.queryobj);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +0,0 @@
#ifndef RADEON_SANITY_H
#define RADEON_SANITY_H
extern int radeonSanityCmdBuffer( r100ContextPtr rmesa,
int nbox,
drm_clip_rect_t *boxes );
#endif

View File

@ -1,898 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VA Linux Systems Inc., Fremont, California.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/**
* \file radeon_screen.c
* Screen initialization functions for the Radeon driver.
*
* \author Kevin E. Martin <martin@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
#include <errno.h>
#include "main/glheader.h"
#include "main/mtypes.h"
#include "main/framebuffer.h"
#include "main/renderbuffer.h"
#include "main/fbobject.h"
#include "util/u_memory.h"
#include "swrast/s_renderbuffer.h"
#include "radeon_chipset.h"
#include "radeon_screen.h"
#include "radeon_common.h"
#include "radeon_common_context.h"
#if defined(RADEON_R100)
#include "radeon_context.h"
#include "radeon_tex.h"
#elif defined(RADEON_R200)
#include "r200_context.h"
#include "r200_tex.h"
#endif
#include "utils.h"
#include "GL/internal/dri_interface.h"
/* Radeon configuration
*/
#include "util/driconf.h"
#define DRI_CONF_COMMAND_BUFFER_SIZE(def,min,max) \
DRI_CONF_OPT_I(command_buffer_size, def, min, max, \
"Size of command buffer (in KB)")
#define DRI_CONF_MAX_TEXTURE_UNITS(def,min,max) \
DRI_CONF_OPT_I(texture_units,def, min, max, \
"Number of texture units used")
#define DRI_CONF_HYPERZ(def) \
DRI_CONF_OPT_B(hyperz, def, "Use HyperZ to boost performance")
#define DRI_CONF_TCL_MODE(def) \
DRI_CONF_OPT_E(tcl_mode, def, 0, 3, \
"TCL mode (Transformation, Clipping, Lighting)", \
DRI_CONF_ENUM(0,"Use software TCL pipeline") \
DRI_CONF_ENUM(1,"Use hardware TCL as first TCL pipeline stage") \
DRI_CONF_ENUM(2,"Bypass the TCL pipeline") \
DRI_CONF_ENUM(3,"Bypass the TCL pipeline with state-based machine code generated on-the-fly"))
#define DRI_CONF_NO_NEG_LOD_BIAS(def) \
DRI_CONF_OPT_B(no_neg_lod_bias, def, "Forbid negative texture LOD bias")
#define DRI_CONF_DEF_MAX_ANISOTROPY(def, min, max) \
DRI_CONF_OPT_F(def_max_anisotropy,def, min, max, \
"Initial maximum value for anisotropic texture filtering")
#if defined(RADEON_R100) /* R100 */
static const driOptionDescription radeon_driconf[] = {
DRI_CONF_SECTION_PERFORMANCE
DRI_CONF_TCL_MODE(DRI_CONF_TCL_CODEGEN)
DRI_CONF_FTHROTTLE_MODE(DRI_CONF_FTHROTTLE_IRQS)
DRI_CONF_MAX_TEXTURE_UNITS(3,2,3)
DRI_CONF_HYPERZ(false)
DRI_CONF_COMMAND_BUFFER_SIZE(8, 8, 32)
DRI_CONF_SECTION_END
DRI_CONF_SECTION_QUALITY
DRI_CONF_TEXTURE_DEPTH(DRI_CONF_TEXTURE_DEPTH_FB)
DRI_CONF_DEF_MAX_ANISOTROPY(1.0, 1.0, 16.0)
DRI_CONF_NO_NEG_LOD_BIAS(false)
DRI_CONF_COLOR_REDUCTION(DRI_CONF_COLOR_REDUCTION_DITHER)
DRI_CONF_ROUND_MODE(DRI_CONF_ROUND_TRUNC)
DRI_CONF_DITHER_MODE(DRI_CONF_DITHER_XERRORDIFF)
DRI_CONF_SECTION_END
};
#elif defined(RADEON_R200)
static const driOptionDescription radeon_driconf[] = {
DRI_CONF_SECTION_PERFORMANCE
DRI_CONF_TCL_MODE(DRI_CONF_TCL_CODEGEN)
DRI_CONF_FTHROTTLE_MODE(DRI_CONF_FTHROTTLE_IRQS)
DRI_CONF_MAX_TEXTURE_UNITS(6,2,6)
DRI_CONF_HYPERZ(false)
DRI_CONF_COMMAND_BUFFER_SIZE(8, 8, 32)
DRI_CONF_SECTION_END
DRI_CONF_SECTION_QUALITY
DRI_CONF_TEXTURE_DEPTH(DRI_CONF_TEXTURE_DEPTH_FB)
DRI_CONF_DEF_MAX_ANISOTROPY(1.0, 1.0, 16.0)
DRI_CONF_NO_NEG_LOD_BIAS(false)
DRI_CONF_COLOR_REDUCTION(DRI_CONF_COLOR_REDUCTION_DITHER)
DRI_CONF_ROUND_MODE(DRI_CONF_ROUND_TRUNC)
DRI_CONF_DITHER_MODE(DRI_CONF_DITHER_XERRORDIFF)
DRI_CONF_OPT_F(texture_blend_quality, 1.0, 0.0, 1.0,
"Texture filtering quality vs. speed, AKA “brilinear” texture filtering")
DRI_CONF_SECTION_END
};
#endif
static char *
radeon_driconf_get_xml(const char *driver_name)
{
return driGetOptionsXml(radeon_driconf, ARRAY_SIZE(radeon_driconf));
}
static const __DRIconfigOptionsExtension radeon_config_options = {
.base = { __DRI_CONFIG_OPTIONS, 2 },
.xml = NULL,
.getXml = radeon_driconf_get_xml,
};
static int
radeonGetParam(__DRIscreen *sPriv, int param, void *value)
{
struct drm_radeon_info info = { 0 };
info.value = (uint64_t)(uintptr_t)value;
switch (param) {
case RADEON_PARAM_DEVICE_ID:
info.request = RADEON_INFO_DEVICE_ID;
break;
case RADEON_PARAM_NUM_GB_PIPES:
info.request = RADEON_INFO_NUM_GB_PIPES;
break;
case RADEON_PARAM_NUM_Z_PIPES:
info.request = RADEON_INFO_NUM_Z_PIPES;
break;
case RADEON_INFO_TILING_CONFIG:
info.request = RADEON_INFO_TILING_CONFIG;
break;
default:
return -EINVAL;
}
return drmCommandWriteRead(sPriv->fd, DRM_RADEON_INFO, &info, sizeof(info));
}
#if defined(RADEON_R100)
static const __DRItexBufferExtension radeonTexBufferExtension = {
.base = { __DRI_TEX_BUFFER, 3 },
.setTexBuffer = radeonSetTexBuffer,
.setTexBuffer2 = radeonSetTexBuffer2,
.releaseTexBuffer = NULL,
};
#elif defined(RADEON_R200)
static const __DRItexBufferExtension r200TexBufferExtension = {
.base = { __DRI_TEX_BUFFER, 3 },
.setTexBuffer = r200SetTexBuffer,
.setTexBuffer2 = r200SetTexBuffer2,
.releaseTexBuffer = NULL,
};
#endif
static void
radeonDRI2Flush(__DRIdrawable *drawable)
{
radeonContextPtr rmesa;
rmesa = (radeonContextPtr) drawable->driContextPriv->driverPrivate;
radeonFlush(&rmesa->glCtx, 0);
}
static const struct __DRI2flushExtensionRec radeonFlushExtension = {
.base = { __DRI2_FLUSH, 3 },
.flush = radeonDRI2Flush,
.invalidate = dri2InvalidateDrawable,
};
static __DRIimage *
radeon_create_image_from_name(__DRIscreen *screen,
int width, int height, int format,
int name, int pitch, void *loaderPrivate)
{
__DRIimage *image;
radeonScreenPtr radeonScreen = screen->driverPrivate;
if (name == 0)
return NULL;
image = calloc(1, sizeof *image);
if (image == NULL)
return NULL;
switch (format) {
case __DRI_IMAGE_FORMAT_RGB565:
image->format = MESA_FORMAT_B5G6R5_UNORM;
image->internal_format = GL_RGB;
image->data_type = GL_UNSIGNED_BYTE;
break;
case __DRI_IMAGE_FORMAT_XRGB8888:
image->format = MESA_FORMAT_B8G8R8X8_UNORM;
image->internal_format = GL_RGB;
image->data_type = GL_UNSIGNED_BYTE;
break;
case __DRI_IMAGE_FORMAT_ARGB8888:
image->format = MESA_FORMAT_B8G8R8A8_UNORM;
image->internal_format = GL_RGBA;
image->data_type = GL_UNSIGNED_BYTE;
break;
default:
free(image);
return NULL;
}
image->data = loaderPrivate;
image->cpp = _mesa_get_format_bytes(image->format);
image->width = width;
image->pitch = pitch;
image->height = height;
image->bo = radeon_bo_open(radeonScreen->bom,
(uint32_t)name,
image->pitch * image->height * image->cpp,
0,
RADEON_GEM_DOMAIN_VRAM,
0);
if (image->bo == NULL) {
free(image);
return NULL;
}
return image;
}
static __DRIimage *
radeon_create_image_from_renderbuffer(__DRIcontext *context,
int renderbuffer, void *loaderPrivate)
{
__DRIimage *image;
radeonContextPtr radeon = context->driverPrivate;
struct gl_renderbuffer *rb;
struct radeon_renderbuffer *rrb;
rb = _mesa_lookup_renderbuffer(&radeon->glCtx, renderbuffer);
if (!rb) {
_mesa_error(&radeon->glCtx,
GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
return NULL;
}
rrb = radeon_renderbuffer(rb);
image = calloc(1, sizeof *image);
if (image == NULL)
return NULL;
image->internal_format = rb->InternalFormat;
image->format = rb->Format;
image->cpp = rrb->cpp;
image->data_type = GL_UNSIGNED_BYTE;
image->data = loaderPrivate;
radeon_bo_ref(rrb->bo);
image->bo = rrb->bo;
image->width = rb->Width;
image->height = rb->Height;
image->pitch = rrb->pitch / image->cpp;
return image;
}
static void
radeon_destroy_image(__DRIimage *image)
{
radeon_bo_unref(image->bo);
free(image);
}
static __DRIimage *
radeon_create_image(__DRIscreen *screen,
int width, int height, int format,
unsigned int use,
void *loaderPrivate)
{
__DRIimage *image;
radeonScreenPtr radeonScreen = screen->driverPrivate;
image = calloc(1, sizeof *image);
if (image == NULL)
return NULL;
image->dri_format = format;
switch (format) {
case __DRI_IMAGE_FORMAT_RGB565:
image->format = MESA_FORMAT_B5G6R5_UNORM;
image->internal_format = GL_RGB;
image->data_type = GL_UNSIGNED_BYTE;
break;
case __DRI_IMAGE_FORMAT_XRGB8888:
image->format = MESA_FORMAT_B8G8R8X8_UNORM;
image->internal_format = GL_RGB;
image->data_type = GL_UNSIGNED_BYTE;
break;
case __DRI_IMAGE_FORMAT_ARGB8888:
image->format = MESA_FORMAT_B8G8R8A8_UNORM;
image->internal_format = GL_RGBA;
image->data_type = GL_UNSIGNED_BYTE;
break;
default:
free(image);
return NULL;
}
image->data = loaderPrivate;
image->cpp = _mesa_get_format_bytes(image->format);
image->width = width;
image->height = height;
image->pitch = ((image->cpp * image->width + 255) & ~255) / image->cpp;
image->bo = radeon_bo_open(radeonScreen->bom,
0,
image->pitch * image->height * image->cpp,
0,
RADEON_GEM_DOMAIN_VRAM,
0);
if (image->bo == NULL) {
free(image);
return NULL;
}
return image;
}
static GLboolean
radeon_query_image(__DRIimage *image, int attrib, int *value)
{
switch (attrib) {
case __DRI_IMAGE_ATTRIB_STRIDE:
*value = image->pitch * image->cpp;
return GL_TRUE;
case __DRI_IMAGE_ATTRIB_HANDLE:
*value = image->bo->handle;
return GL_TRUE;
case __DRI_IMAGE_ATTRIB_NAME:
radeon_gem_get_kernel_name(image->bo, (uint32_t *) value);
return GL_TRUE;
default:
return GL_FALSE;
}
}
static const __DRIimageExtension radeonImageExtension = {
.base = { __DRI_IMAGE, 1 },
.createImageFromName = radeon_create_image_from_name,
.createImageFromRenderbuffer = radeon_create_image_from_renderbuffer,
.destroyImage = radeon_destroy_image,
.createImage = radeon_create_image,
.queryImage = radeon_query_image
};
static int radeon_set_screen_flags(radeonScreenPtr screen, int device_id)
{
screen->device_id = device_id;
screen->chip_flags = 0;
switch ( device_id ) {
#if defined(RADEON_R100)
case PCI_CHIP_RN50_515E:
case PCI_CHIP_RN50_5969:
return -1;
case PCI_CHIP_RADEON_LY:
case PCI_CHIP_RADEON_LZ:
case PCI_CHIP_RADEON_QY:
case PCI_CHIP_RADEON_QZ:
screen->chip_family = CHIP_FAMILY_RV100;
break;
case PCI_CHIP_RS100_4136:
case PCI_CHIP_RS100_4336:
screen->chip_family = CHIP_FAMILY_RS100;
break;
case PCI_CHIP_RS200_4137:
case PCI_CHIP_RS200_4337:
case PCI_CHIP_RS250_4237:
case PCI_CHIP_RS250_4437:
screen->chip_family = CHIP_FAMILY_RS200;
break;
case PCI_CHIP_RADEON_QD:
case PCI_CHIP_RADEON_QE:
case PCI_CHIP_RADEON_QF:
case PCI_CHIP_RADEON_QG:
/* all original radeons (7200) presumably have a stencil op bug */
screen->chip_family = CHIP_FAMILY_R100;
screen->chip_flags = RADEON_CHIPSET_TCL | RADEON_CHIPSET_BROKEN_STENCIL | RADEON_CHIPSET_DEPTH_ALWAYS_TILED;
break;
case PCI_CHIP_RV200_QW:
case PCI_CHIP_RV200_QX:
case PCI_CHIP_RADEON_LW:
case PCI_CHIP_RADEON_LX:
screen->chip_family = CHIP_FAMILY_RV200;
screen->chip_flags = RADEON_CHIPSET_TCL | RADEON_CHIPSET_DEPTH_ALWAYS_TILED;
break;
#elif defined(RADEON_R200)
case PCI_CHIP_R200_BB:
case PCI_CHIP_R200_QH:
case PCI_CHIP_R200_QL:
case PCI_CHIP_R200_QM:
screen->chip_family = CHIP_FAMILY_R200;
screen->chip_flags = RADEON_CHIPSET_TCL | RADEON_CHIPSET_DEPTH_ALWAYS_TILED;
break;
case PCI_CHIP_RV250_If:
case PCI_CHIP_RV250_Ig:
case PCI_CHIP_RV250_Ld:
case PCI_CHIP_RV250_Lf:
case PCI_CHIP_RV250_Lg:
screen->chip_family = CHIP_FAMILY_RV250;
screen->chip_flags = R200_CHIPSET_YCBCR_BROKEN | RADEON_CHIPSET_TCL | RADEON_CHIPSET_DEPTH_ALWAYS_TILED;
break;
case PCI_CHIP_RV280_4C6E:
case PCI_CHIP_RV280_5960:
case PCI_CHIP_RV280_5961:
case PCI_CHIP_RV280_5962:
case PCI_CHIP_RV280_5964:
case PCI_CHIP_RV280_5965:
case PCI_CHIP_RV280_5C61:
case PCI_CHIP_RV280_5C63:
screen->chip_family = CHIP_FAMILY_RV280;
screen->chip_flags = RADEON_CHIPSET_TCL | RADEON_CHIPSET_DEPTH_ALWAYS_TILED;
break;
case PCI_CHIP_RS300_5834:
case PCI_CHIP_RS300_5835:
case PCI_CHIP_RS350_7834:
case PCI_CHIP_RS350_7835:
screen->chip_family = CHIP_FAMILY_RS300;
screen->chip_flags = RADEON_CHIPSET_DEPTH_ALWAYS_TILED;
break;
#endif
default:
fprintf(stderr, "unknown chip id 0x%x, can't guess.\n",
device_id);
return -1;
}
return 0;
}
static int
radeonQueryRendererInteger(__DRIscreen *psp, int param,
unsigned int *value)
{
radeonScreenPtr screen = (radeonScreenPtr)psp->driverPrivate;
switch (param) {
case __DRI2_RENDERER_VENDOR_ID:
value[0] = 0x1002;
return 0;
case __DRI2_RENDERER_DEVICE_ID:
value[0] = screen->device_id;
return 0;
case __DRI2_RENDERER_ACCELERATED:
value[0] = 1;
return 0;
case __DRI2_RENDERER_VIDEO_MEMORY: {
struct drm_radeon_gem_info gem_info;
int retval;
memset(&gem_info, 0, sizeof(gem_info));
/* Get GEM info. */
retval = drmCommandWriteRead(psp->fd, DRM_RADEON_GEM_INFO, &gem_info,
sizeof(gem_info));
if (retval) {
fprintf(stderr, "radeon: Failed to get MM info, error number %d\n",
retval);
return -1;
}
/* XXX: Do we want to return vram_size or vram_visible ? */
value[0] = gem_info.vram_size >> 20;
return 0;
}
case __DRI2_RENDERER_UNIFIED_MEMORY_ARCHITECTURE:
value[0] = 0;
return 0;
default:
return driQueryRendererIntegerCommon(psp, param, value);
}
}
static int
radeonQueryRendererString(__DRIscreen *psp, int param, const char **value)
{
radeonScreenPtr screen = (radeonScreenPtr)psp->driverPrivate;
switch (param) {
case __DRI2_RENDERER_VENDOR_ID:
value[0] = radeonVendorString;
return 0;
case __DRI2_RENDERER_DEVICE_ID:
value[0] = radeonGetRendererString(screen);
return 0;
default:
return -1;
}
}
static const __DRI2rendererQueryExtension radeonRendererQueryExtension = {
.base = { __DRI2_RENDERER_QUERY, 1 },
.queryInteger = radeonQueryRendererInteger,
.queryString = radeonQueryRendererString
};
static const __DRIextension *radeon_screen_extensions[] = {
&dri2ConfigQueryExtension.base,
#if defined(RADEON_R100)
&radeonTexBufferExtension.base,
#elif defined(RADEON_R200)
&r200TexBufferExtension.base,
#endif
&radeonFlushExtension.base,
&radeonImageExtension.base,
&radeonRendererQueryExtension.base,
&dri2NoErrorExtension.base,
NULL
};
static radeonScreenPtr
radeonCreateScreen2(__DRIscreen *sPriv)
{
radeonScreenPtr screen;
int ret;
uint32_t device_id = 0;
/* Allocate the private area */
screen = calloc(1, sizeof(*screen));
if ( !screen ) {
fprintf(stderr, "%s: Could not allocate memory for screen structure", __func__);
fprintf(stderr, "leaving here\n");
return NULL;
}
radeon_init_debug();
/* parse information in __driConfigOptions */
driParseOptionInfo (&screen->optionCache, radeon_driconf,
ARRAY_SIZE(radeon_driconf));
screen->chip_flags = 0;
screen->irq = 1;
ret = radeonGetParam(sPriv, RADEON_PARAM_DEVICE_ID, &device_id);
if (ret) {
free( screen );
fprintf(stderr, "drm_radeon_getparam_t (RADEON_PARAM_DEVICE_ID): %d\n", ret);
return NULL;
}
ret = radeon_set_screen_flags(screen, device_id);
if (ret == -1) {
free(screen);
return NULL;
}
if (getenv("RADEON_NO_TCL"))
screen->chip_flags &= ~RADEON_CHIPSET_TCL;
sPriv->extensions = radeon_screen_extensions;
screen->driScreen = sPriv;
screen->bom = radeon_bo_manager_gem_ctor(sPriv->fd);
if (screen->bom == NULL) {
free(screen);
return NULL;
}
return screen;
}
/* Destroy the device specific screen private data struct.
*/
static void
radeonDestroyScreen( __DRIscreen *sPriv )
{
radeonScreenPtr screen = (radeonScreenPtr)sPriv->driverPrivate;
if (!screen)
return;
#ifdef RADEON_BO_TRACK
radeon_tracker_print(&screen->bom->tracker, stderr);
#endif
radeon_bo_manager_gem_dtor(screen->bom);
/* free all option information */
driDestroyOptionInfo (&screen->optionCache);
free( screen );
sPriv->driverPrivate = NULL;
}
/* Initialize the driver specific screen private data.
*/
static GLboolean
radeonInitDriver( __DRIscreen *sPriv )
{
sPriv->driverPrivate = (void *) radeonCreateScreen2( sPriv );
if ( !sPriv->driverPrivate ) {
radeonDestroyScreen( sPriv );
return GL_FALSE;
}
return GL_TRUE;
}
/**
* Create the Mesa framebuffer and renderbuffers for a given window/drawable.
*
* \todo This function (and its interface) will need to be updated to support
* pbuffers.
*/
static GLboolean
radeonCreateBuffer( __DRIscreen *driScrnPriv,
__DRIdrawable *driDrawPriv,
const struct gl_config *mesaVis,
GLboolean isPixmap )
{
radeonScreenPtr screen = (radeonScreenPtr) driScrnPriv->driverPrivate;
const GLboolean swDepth = GL_FALSE;
const GLboolean swAlpha = GL_FALSE;
const GLboolean swAccum = mesaVis->accumRedBits > 0;
const GLboolean swStencil = mesaVis->stencilBits > 0 &&
mesaVis->depthBits != 24;
mesa_format rgbFormat;
struct radeon_framebuffer *rfb;
if (isPixmap)
return GL_FALSE; /* not implemented */
rfb = CALLOC_STRUCT(radeon_framebuffer);
if (!rfb)
return GL_FALSE;
_mesa_initialize_window_framebuffer(&rfb->base, mesaVis);
if (mesaVis->redBits == 5)
rgbFormat =
#if UTIL_ARCH_LITTLE_ENDIAN
MESA_FORMAT_B5G6R5_UNORM;
#else
MESA_FORMAT_R5G6B5_UNORM;
#endif
else if (mesaVis->alphaBits == 0)
rgbFormat =
#if UTIL_ARCH_LITTLE_ENDIAN
MESA_FORMAT_B8G8R8X8_UNORM;
#else
MESA_FORMAT_X8R8G8B8_UNORM;
#endif
else
rgbFormat =
#if UTIL_ARCH_LITTLE_ENDIAN
MESA_FORMAT_B8G8R8A8_UNORM;
#else
MESA_FORMAT_A8R8G8B8_UNORM;
#endif
/* front color renderbuffer */
rfb->color_rb[0] = radeon_create_renderbuffer(rgbFormat, driDrawPriv);
_mesa_attach_and_own_rb(&rfb->base, BUFFER_FRONT_LEFT, &rfb->color_rb[0]->base.Base);
rfb->color_rb[0]->has_surface = 1;
/* back color renderbuffer */
if (mesaVis->doubleBufferMode) {
rfb->color_rb[1] = radeon_create_renderbuffer(rgbFormat, driDrawPriv);
_mesa_attach_and_own_rb(&rfb->base, BUFFER_BACK_LEFT, &rfb->color_rb[1]->base.Base);
rfb->color_rb[1]->has_surface = 1;
}
if (mesaVis->depthBits == 24) {
if (mesaVis->stencilBits == 8) {
struct radeon_renderbuffer *depthStencilRb =
radeon_create_renderbuffer(MESA_FORMAT_Z24_UNORM_S8_UINT, driDrawPriv);
_mesa_attach_and_own_rb(&rfb->base, BUFFER_DEPTH, &depthStencilRb->base.Base);
_mesa_attach_and_reference_rb(&rfb->base, BUFFER_STENCIL, &depthStencilRb->base.Base);
depthStencilRb->has_surface = screen->depthHasSurface;
} else {
/* depth renderbuffer */
struct radeon_renderbuffer *depth =
radeon_create_renderbuffer(MESA_FORMAT_Z24_UNORM_X8_UINT, driDrawPriv);
_mesa_attach_and_own_rb(&rfb->base, BUFFER_DEPTH, &depth->base.Base);
depth->has_surface = screen->depthHasSurface;
}
} else if (mesaVis->depthBits == 16) {
/* just 16-bit depth buffer, no hw stencil */
struct radeon_renderbuffer *depth =
radeon_create_renderbuffer(MESA_FORMAT_Z_UNORM16, driDrawPriv);
_mesa_attach_and_own_rb(&rfb->base, BUFFER_DEPTH, &depth->base.Base);
depth->has_surface = screen->depthHasSurface;
}
_swrast_add_soft_renderbuffers(&rfb->base,
GL_FALSE, /* color */
swDepth,
swStencil,
swAccum,
swAlpha);
driDrawPriv->driverPrivate = (void *) rfb;
return (driDrawPriv->driverPrivate != NULL);
}
static void radeon_cleanup_renderbuffers(struct radeon_framebuffer *rfb)
{
struct radeon_renderbuffer *rb;
rb = rfb->color_rb[0];
if (rb && rb->bo) {
radeon_bo_unref(rb->bo);
rb->bo = NULL;
}
rb = rfb->color_rb[1];
if (rb && rb->bo) {
radeon_bo_unref(rb->bo);
rb->bo = NULL;
}
rb = radeon_get_renderbuffer(&rfb->base, BUFFER_DEPTH);
if (rb && rb->bo) {
radeon_bo_unref(rb->bo);
rb->bo = NULL;
}
}
void
radeonDestroyBuffer(__DRIdrawable *driDrawPriv)
{
struct radeon_framebuffer *rfb;
if (!driDrawPriv)
return;
rfb = (void*)driDrawPriv->driverPrivate;
if (!rfb)
return;
radeon_cleanup_renderbuffers(rfb);
_mesa_reference_framebuffer((struct gl_framebuffer **)(&(driDrawPriv->driverPrivate)), NULL);
}
/**
* This is the driver specific part of the createNewScreen entry point.
* Called when using DRI2.
*
* \return the struct gl_config supported by this driver
*/
static const
__DRIconfig **radeonInitScreen2(__DRIscreen *psp)
{
static const mesa_format formats[3] = {
MESA_FORMAT_B5G6R5_UNORM,
MESA_FORMAT_B8G8R8X8_UNORM,
MESA_FORMAT_B8G8R8A8_UNORM
};
static const GLenum back_buffer_modes[] = {
__DRI_ATTRIB_SWAP_NONE, __DRI_ATTRIB_SWAP_UNDEFINED
};
uint8_t depth_bits[4], stencil_bits[4], msaa_samples_array[1];
int color;
__DRIconfig **configs = NULL;
psp->max_gl_compat_version = 13;
psp->max_gl_es1_version = 11;
if (!radeonInitDriver(psp)) {
return NULL;
}
depth_bits[0] = 0;
stencil_bits[0] = 0;
depth_bits[1] = 16;
stencil_bits[1] = 0;
depth_bits[2] = 24;
stencil_bits[2] = 0;
depth_bits[3] = 24;
stencil_bits[3] = 8;
msaa_samples_array[0] = 0;
for (color = 0; color < ARRAY_SIZE(formats); color++) {
__DRIconfig **new_configs;
new_configs = driCreateConfigs(formats[color],
depth_bits,
stencil_bits,
ARRAY_SIZE(depth_bits),
back_buffer_modes,
ARRAY_SIZE(back_buffer_modes),
msaa_samples_array,
ARRAY_SIZE(msaa_samples_array),
GL_TRUE, GL_FALSE);
configs = driConcatConfigs(configs, new_configs);
}
if (configs == NULL) {
fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
__LINE__);
return NULL;
}
return (const __DRIconfig **)configs;
}
static const struct __DriverAPIRec radeon_driver_api = {
.InitScreen = radeonInitScreen2,
.DestroyScreen = radeonDestroyScreen,
#if defined(RADEON_R200)
.CreateContext = r200CreateContext,
.DestroyContext = r200DestroyContext,
#else
.CreateContext = r100CreateContext,
.DestroyContext = radeonDestroyContext,
#endif
.CreateBuffer = radeonCreateBuffer,
.DestroyBuffer = radeonDestroyBuffer,
.MakeCurrent = radeonMakeCurrent,
.UnbindContext = radeonUnbindContext,
};
static const struct __DRIDriverVtableExtensionRec radeon_vtable = {
.base = { __DRI_DRIVER_VTABLE, 1 },
.vtable = &radeon_driver_api,
};
/* This is the table of extensions that the loader will dlsym() for. */
static const __DRIextension *radeon_driver_extensions[] = {
&driCoreExtension.base,
&driDRI2Extension.base,
&radeon_config_options.base,
&radeon_vtable.base,
NULL
};
#ifdef RADEON_R200
PUBLIC const __DRIextension **__driDriverGetExtensions_r200(void)
{
globalDriverAPI = &radeon_driver_api;
return radeon_driver_extensions;
}
#else
PUBLIC const __DRIextension **__driDriverGetExtensions_radeon(void)
{
globalDriverAPI = &radeon_driver_api;
return radeon_driver_extensions;
}
#endif

View File

@ -1,269 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VA Linux Systems Inc., Fremont, California.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*/
#ifndef __RADEON_SCREEN_H__
#define __RADEON_SCREEN_H__
/*
* IMPORTS: these headers contain all the DRI, X and kernel-related
* definitions that we need.
*/
#include <xf86drm.h>
#include <radeon_drm.h>
#include "dri_util.h"
#include "radeon_chipset.h"
#include "radeon_reg.h"
#include "util/xmlconfig.h"
#define DRI_CONF_COLOR_REDUCTION_ROUND 0
#define DRI_CONF_COLOR_REDUCTION_DITHER 1
#define DRI_CONF_COLOR_REDUCTION(def) \
DRI_CONF_OPT_E(color_reduction, def, 0, 1, \
"Initial color reduction method", \
DRI_CONF_ENUM(0, "Round colors") \
DRI_CONF_ENUM(1, "Dither colors"))
#define DRI_CONF_DITHER_XERRORDIFF 0
#define DRI_CONF_DITHER_XERRORDIFFRESET 1
#define DRI_CONF_DITHER_ORDERED 2
#define DRI_CONF_DITHER_MODE(def) \
DRI_CONF_OPT_E(dither_mode, def, 0, 2, \
"Color dithering method", \
DRI_CONF_ENUM(0, "Horizontal error diffusion") \
DRI_CONF_ENUM(1, "Horizontal error diffusion, reset error at line start") \
DRI_CONF_ENUM(2, "Ordered 2D color dithering"))
#define DRI_CONF_ROUND_TRUNC 0
#define DRI_CONF_ROUND_ROUND 1
#define DRI_CONF_ROUND_MODE(def) \
DRI_CONF_OPT_E(round_mode, def, 0, 1, \
"Color rounding method", \
DRI_CONF_ENUM(0, "Round color components downward") \
DRI_CONF_ENUM(1, "Round to nearest color"))
#define DRI_CONF_FTHROTTLE_BUSY 0
#define DRI_CONF_FTHROTTLE_USLEEPS 1
#define DRI_CONF_FTHROTTLE_IRQS 2
#define DRI_CONF_FTHROTTLE_MODE(def) \
DRI_CONF_OPT_E(fthrottle_mode, def, 0, 2, \
"Method to limit rendering latency", \
DRI_CONF_ENUM(0, "Busy waiting for the graphics hardware") \
DRI_CONF_ENUM(1, "Sleep for brief intervals while waiting for the graphics hardware") \
DRI_CONF_ENUM(2, "Let the graphics hardware emit a software interrupt and sleep"))
#define DRI_CONF_TEXTURE_DEPTH_FB 0
#define DRI_CONF_TEXTURE_DEPTH_32 1
#define DRI_CONF_TEXTURE_DEPTH_16 2
#define DRI_CONF_TEXTURE_DEPTH_FORCE_16 3
#define DRI_CONF_TEXTURE_DEPTH(def) \
DRI_CONF_OPT_E(texture_depth, def, 0, 3, \
"Texture color depth", \
DRI_CONF_ENUM(0, "Prefer frame buffer color depth") \
DRI_CONF_ENUM(1, "Prefer 32 bits per texel") \
DRI_CONF_ENUM(2, "Prefer 16 bits per texel") \
DRI_CONF_ENUM(3, "Force 16 bits per texel"))
#define DRI_CONF_TCL_SW 0
#define DRI_CONF_TCL_PIPELINED 1
#define DRI_CONF_TCL_VTXFMT 2
#define DRI_CONF_TCL_CODEGEN 3
typedef struct {
drm_handle_t handle; /* Handle to the DRM region */
drmSize size; /* Size of the DRM region */
drmAddress map; /* Mapping of the DRM region */
} radeonRegionRec, *radeonRegionPtr;
typedef struct radeon_screen {
int chip_family;
int chip_flags;
int cpp;
int card_type;
int device_id; /* PCI ID */
int AGPMode;
unsigned int irq; /* IRQ number (0 means none) */
unsigned int fbLocation;
unsigned int frontOffset;
unsigned int frontPitch;
unsigned int backOffset;
unsigned int backPitch;
unsigned int depthOffset;
unsigned int depthPitch;
/* Shared texture data */
int numTexHeaps;
int texOffset[RADEON_NR_TEX_HEAPS];
int texSize[RADEON_NR_TEX_HEAPS];
int logTexGranularity[RADEON_NR_TEX_HEAPS];
radeonRegionRec mmio;
radeonRegionRec status;
radeonRegionRec gartTextures;
drmBufMapPtr buffers;
__volatile__ uint32_t *scratch;
__DRIscreen *driScreen;
unsigned int gart_buffer_offset; /* offset in card memory space */
unsigned int gart_texture_offset; /* offset in card memory space */
unsigned int gart_base;
GLboolean depthHasSurface;
/* Configuration cache with default values for all contexts */
driOptionCache optionCache;
int num_gb_pipes;
int num_z_pipes;
struct radeon_bo_manager *bom;
} radeonScreenRec, *radeonScreenPtr;
struct __DRIimageRec {
struct radeon_bo *bo;
GLenum internal_format;
uint32_t dri_format;
GLuint format;
GLenum data_type;
int width, height; /* in pixels */
int pitch; /* in pixels */
int cpp;
void *data;
};
#ifdef RADEON_R200
/* These defines are to ensure that r200_dri's symbols don't conflict with
* radeon's when linked together.
*/
#define get_radeon_buffer_object r200_get_radeon_buffer_object
#define radeonInitBufferObjectFuncs r200_radeonInitBufferObjectFuncs
#define radeonDestroyContext r200_radeonDestroyContext
#define radeonInitContext r200_radeonInitContext
#define radeonMakeCurrent r200_radeonMakeCurrent
#define radeon_prepare_render r200_radeon_prepare_render
#define radeonUnbindContext r200_radeonUnbindContext
#define radeon_update_renderbuffers r200_radeon_update_renderbuffers
#define radeonCountStateEmitSize r200_radeonCountStateEmitSize
#define radeon_draw_buffer r200_radeon_draw_buffer
#define radeonDrawBuffer r200_radeonDrawBuffer
#define radeonEmitState r200_radeonEmitState
#define radeonFinish r200_radeonFinish
#define radeonFlush r200_radeonFlush
#define radeonGetAge r200_radeonGetAge
#define radeonReadBuffer r200_radeonReadBuffer
#define radeonScissor r200_radeonScissor
#define radeonSetCliprects r200_radeonSetCliprects
#define radeonUpdateScissor r200_radeonUpdateScissor
#define radeonUserClear r200_radeonUserClear
#define radeon_viewport r200_radeon_viewport
#define radeon_window_moved r200_radeon_window_moved
#define rcommonBeginBatch r200_rcommonBeginBatch
#define rcommonDestroyCmdBuf r200_rcommonDestroyCmdBuf
#define rcommonEnsureCmdBufSpace r200_rcommonEnsureCmdBufSpace
#define rcommonFlushCmdBuf r200_rcommonFlushCmdBuf
#define rcommonFlushCmdBufLocked r200_rcommonFlushCmdBufLocked
#define rcommonInitCmdBuf r200_rcommonInitCmdBuf
#define radeonAllocDmaRegion r200_radeonAllocDmaRegion
#define radeonEmitVec12 r200_radeonEmitVec12
#define radeonEmitVec16 r200_radeonEmitVec16
#define radeonEmitVec4 r200_radeonEmitVec4
#define radeonEmitVec8 r200_radeonEmitVec8
#define radeonFreeDmaRegions r200_radeonFreeDmaRegions
#define radeon_init_dma r200_radeon_init_dma
#define radeonRefillCurrentDmaRegion r200_radeonRefillCurrentDmaRegion
#define radeonReleaseArrays r200_radeonReleaseArrays
#define radeonReleaseDmaRegions r200_radeonReleaseDmaRegions
#define radeonReturnDmaRegion r200_radeonReturnDmaRegion
#define rcommonAllocDmaLowVerts r200_rcommonAllocDmaLowVerts
#define rcommon_emit_vecfog r200_rcommon_emit_vecfog
#define rcommon_emit_vector r200_rcommon_emit_vector
#define rcommon_flush_last_swtcl_prim r200_rcommon_flush_last_swtcl_prim
#define _radeon_debug_add_indent r200__radeon_debug_add_indent
#define _radeon_debug_remove_indent r200__radeon_debug_remove_indent
#define radeon_init_debug r200_radeon_init_debug
#define _radeon_print r200__radeon_print
#define radeon_create_renderbuffer r200_radeon_create_renderbuffer
#define radeon_fbo_init r200_radeon_fbo_init
#define radeon_renderbuffer_set_bo r200_radeon_renderbuffer_set_bo
#define radeonComputeFogBlendFactor r200_radeonComputeFogBlendFactor
#define radeonInitStaticFogData r200_radeonInitStaticFogData
#define get_base_teximage_offset r200_get_base_teximage_offset
#define get_texture_image_row_stride r200_get_texture_image_row_stride
#define get_texture_image_size r200_get_texture_image_size
#define radeon_miptree_create r200_radeon_miptree_create
#define radeon_miptree_image_offset r200_radeon_miptree_image_offset
#define radeon_miptree_matches_image r200_radeon_miptree_matches_image
#define radeon_miptree_reference r200_radeon_miptree_reference
#define radeon_miptree_unreference r200_radeon_miptree_unreference
#define radeon_try_alloc_miptree r200_radeon_try_alloc_miptree
#define radeon_validate_texture_miptree r200_radeon_validate_texture_miptree
#define radeonReadPixels r200_radeonReadPixels
#define radeon_check_query_active r200_radeon_check_query_active
#define radeonEmitQueryEnd r200_radeonEmitQueryEnd
#define radeon_emit_queryobj r200_radeon_emit_queryobj
#define radeonInitQueryObjFunctions r200_radeonInitQueryObjFunctions
#define radeonInitSpanFuncs r200_radeonInitSpanFuncs
#define copy_rows r200_copy_rows
#define radeonChooseTextureFormat r200_radeonChooseTextureFormat
#define radeonChooseTextureFormat_mesa r200_radeonChooseTextureFormat_mesa
#define radeonFreeTextureImageBuffer r200_radeonFreeTextureImageBuffer
#define radeon_image_target_texture_2d r200_radeon_image_target_texture_2d
#define radeon_init_common_texture_funcs r200_radeon_init_common_texture_funcs
#define radeonIsFormatRenderable r200_radeonIsFormatRenderable
#define radeonNewTextureImage r200_radeonNewTextureImage
#define _radeon_texformat_al88 r200__radeon_texformat_al88
#define _radeon_texformat_argb1555 r200__radeon_texformat_argb1555
#define _radeon_texformat_argb4444 r200__radeon_texformat_argb4444
#define _radeon_texformat_argb8888 r200__radeon_texformat_argb8888
#define _radeon_texformat_rgb565 r200__radeon_texformat_rgb565
#define _radeon_texformat_rgba8888 r200__radeon_texformat_rgba8888
#define radeonCopyTexSubImage r200_radeonCopyTexSubImage
#define get_tile_size r200_get_tile_size
#define tile_image r200_tile_image
#define untile_image r200_untile_image
#define set_re_cntl_d3d r200_set_re_cntl_d3d
#define radeonDestroyBuffer r200_radeonDestroyBuffer
#define radeonVendorString r200_radeonVendorString
#define radeonGetRendererString r200_radeonGetRendererString
#endif
extern void radeonDestroyBuffer(__DRIdrawable *driDrawPriv);
const __DRIextension **__driDriverGetExtensions_radeon(void);
const __DRIextension **__driDriverGetExtensions_r200(void);
#endif /* __RADEON_SCREEN_H__ */

View File

@ -1,155 +0,0 @@
/**************************************************************************
Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VA Linux Systems Inc., Fremont, California.
The Weather Channel (TM) funded Tungsten Graphics to develop the
initial release of the Radeon 8500 driver under the XFree86 license.
This notice must be preserved.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
* Keith Whitwell <keithw@vmware.com>
*
*/
#include "main/glheader.h"
#include "main/texformat.h"
#include "main/renderbuffer.h"
#include "main/samplerobj.h"
#include "main/framebuffer.h"
#include "swrast/swrast.h"
#include "swrast/s_renderbuffer.h"
#include "radeon_common.h"
#include "radeon_span.h"
static void
radeon_renderbuffer_map(struct gl_context *ctx,
struct gl_renderbuffer *rb,
bool flip_y)
{
struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
GLubyte *map;
int stride;
if (!rb || !rrb)
return;
ctx->Driver.MapRenderbuffer(ctx, rb, 0, 0, rb->Width, rb->Height,
GL_MAP_READ_BIT | GL_MAP_WRITE_BIT,
&map, &stride, flip_y);
rrb->base.Map = map;
rrb->base.RowStride = stride;
/* No floating point color buffers, use GLubytes */
rrb->base.ColorType = GL_UNSIGNED_BYTE;
}
static void
radeon_renderbuffer_unmap(struct gl_context *ctx, struct gl_renderbuffer *rb)
{
struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
if (!rb || !rrb)
return;
ctx->Driver.UnmapRenderbuffer(ctx, rb);
rrb->base.Map = NULL;
rrb->base.RowStride = 0;
}
static void
radeon_map_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
{
GLuint i;
radeon_print(RADEON_MEMORY, RADEON_TRACE,
"%s( %p , fb %p )\n",
__func__, ctx, fb);
/* check for render to textures */
for (i = 0; i < BUFFER_COUNT; i++)
radeon_renderbuffer_map(ctx, fb->Attachment[i].Renderbuffer,
fb->FlipY);
if (_mesa_is_front_buffer_drawing(fb))
RADEON_CONTEXT(ctx)->front_buffer_dirty = true;
}
static void
radeon_unmap_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
{
GLuint i;
radeon_print(RADEON_MEMORY, RADEON_TRACE,
"%s( %p , fb %p)\n",
__func__, ctx, fb);
/* check for render to textures */
for (i = 0; i < BUFFER_COUNT; i++)
radeon_renderbuffer_unmap(ctx, fb->Attachment[i].Renderbuffer);
if (_mesa_is_front_buffer_drawing(fb))
RADEON_CONTEXT(ctx)->front_buffer_dirty = true;
}
static void radeonSpanRenderStart(struct gl_context * ctx)
{
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
radeon_firevertices(rmesa);
_swrast_map_textures(ctx);
radeon_map_framebuffer(ctx, ctx->DrawBuffer);
if (ctx->ReadBuffer != ctx->DrawBuffer)
radeon_map_framebuffer(ctx, ctx->ReadBuffer);
}
static void radeonSpanRenderFinish(struct gl_context * ctx)
{
_swrast_flush(ctx);
_swrast_unmap_textures(ctx);
radeon_unmap_framebuffer(ctx, ctx->DrawBuffer);
if (ctx->ReadBuffer != ctx->DrawBuffer)
radeon_unmap_framebuffer(ctx, ctx->ReadBuffer);
}
void radeonInitSpanFuncs(struct gl_context * ctx)
{
struct swrast_device_driver *swdd =
_swrast_GetDeviceDriverReference(ctx);
swdd->SpanRenderStart = radeonSpanRenderStart;
swdd->SpanRenderFinish = radeonSpanRenderFinish;
}

View File

@ -1,47 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VA Linux Systems Inc., Fremont, California.
Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
The Weather Channel (TM) funded Tungsten Graphics to develop the
initial release of the Radeon 8500 driver under the XFree86 license.
This notice must be preserved.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Gareth Hughes <gareth@valinux.com>
* Keith Whitwell <keithw@vmware.com>
* Kevin E. Martin <martin@valinux.com>
*/
#ifndef __RADEON_SPAN_H__
#define __RADEON_SPAN_H__
extern void radeonInitSpanFuncs(struct gl_context * ctx);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,71 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VA Linux Systems Inc., Fremont, California.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#ifndef __RADEON_STATE_H__
#define __RADEON_STATE_H__
#include "radeon_context.h"
extern void radeonInitState( r100ContextPtr rmesa );
extern void radeonInitStateFuncs( struct gl_context *ctx );
extern void radeonUpdateMaterial( struct gl_context *ctx );
extern void radeonUpdateWindow( struct gl_context *ctx );
extern void radeonUpdateDrawBuffer( struct gl_context *ctx );
extern void radeonUploadTexMatrix( r100ContextPtr rmesa,
int unit, GLboolean swapcols );
extern GLboolean r100ValidateBuffers(struct gl_context *ctx);
extern GLboolean radeonValidateState( struct gl_context *ctx );
extern void radeonFallback( struct gl_context *ctx, GLuint bit, GLboolean mode );
#define FALLBACK( rmesa, bit, mode ) do { \
if ( 0 ) fprintf( stderr, "FALLBACK in %s: #%d=%d\n", \
__func__, bit, mode ); \
radeonFallback( &rmesa->radeon.glCtx, bit, mode ); \
} while (0)
#define MODEL_PROJ 0
#define MODEL 1
#define MODEL_IT 2
#define TEXMAT_0 3
#define TEXMAT_1 4
#define TEXMAT_2 5
#endif

View File

@ -1,928 +0,0 @@
/*
* Copyright 2000, 2001 VA Linux Systems Inc., Fremont, California.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Gareth Hughes <gareth@valinux.com>
* Keith Whitwell <keithw@vmware.com>
*/
#include "main/errors.h"
#include "main/glheader.h"
#include "main/api_arrayelt.h"
#include "swrast/swrast.h"
#include "vbo/vbo.h"
#include "tnl/t_pipeline.h"
#include "swrast_setup/swrast_setup.h"
#include "radeon_context.h"
#include "radeon_mipmap_tree.h"
#include "radeon_ioctl.h"
#include "radeon_state.h"
#include "radeon_queryobj.h"
#include "../r200/r200_reg.h"
#include "util/driconf.h"
/* New (1.3) state mechanism. 3 commands (packet, scalar, vector) in
* 1.3 cmdbuffers allow all previous state to be updated as well as
* the tcl scalar and vector areas.
*/
static struct {
int start;
int len;
const char *name;
} packet[RADEON_MAX_STATE_PACKETS] = {
{RADEON_PP_MISC, 7, "RADEON_PP_MISC"},
{RADEON_PP_CNTL, 3, "RADEON_PP_CNTL"},
{RADEON_RB3D_COLORPITCH, 1, "RADEON_RB3D_COLORPITCH"},
{RADEON_RE_LINE_PATTERN, 2, "RADEON_RE_LINE_PATTERN"},
{RADEON_SE_LINE_WIDTH, 1, "RADEON_SE_LINE_WIDTH"},
{RADEON_PP_LUM_MATRIX, 1, "RADEON_PP_LUM_MATRIX"},
{RADEON_PP_ROT_MATRIX_0, 2, "RADEON_PP_ROT_MATRIX_0"},
{RADEON_RB3D_STENCILREFMASK, 3, "RADEON_RB3D_STENCILREFMASK"},
{RADEON_SE_VPORT_XSCALE, 6, "RADEON_SE_VPORT_XSCALE"},
{RADEON_SE_CNTL, 2, "RADEON_SE_CNTL"},
{RADEON_SE_CNTL_STATUS, 1, "RADEON_SE_CNTL_STATUS"},
{RADEON_RE_MISC, 1, "RADEON_RE_MISC"},
{RADEON_PP_TXFILTER_0, 6, "RADEON_PP_TXFILTER_0"},
{RADEON_PP_BORDER_COLOR_0, 1, "RADEON_PP_BORDER_COLOR_0"},
{RADEON_PP_TXFILTER_1, 6, "RADEON_PP_TXFILTER_1"},
{RADEON_PP_BORDER_COLOR_1, 1, "RADEON_PP_BORDER_COLOR_1"},
{RADEON_PP_TXFILTER_2, 6, "RADEON_PP_TXFILTER_2"},
{RADEON_PP_BORDER_COLOR_2, 1, "RADEON_PP_BORDER_COLOR_2"},
{RADEON_SE_ZBIAS_FACTOR, 2, "RADEON_SE_ZBIAS_FACTOR"},
{RADEON_SE_TCL_OUTPUT_VTX_FMT, 11, "RADEON_SE_TCL_OUTPUT_VTX_FMT"},
{RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 17,
"RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"},
{R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0"},
{R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1"},
{R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2"},
{R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3"},
{R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4"},
{R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5"},
{R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6"},
{R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7"},
{R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0"},
{R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0"},
{R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0"},
{R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL"},
{R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0"},
{R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2"},
{R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL"},
{R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0"},
{R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1"},
{R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2"},
{R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3"},
{R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4"},
{R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5"},
{R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0"},
{R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1"},
{R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2"},
{R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3"},
{R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
{R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
{R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
{R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
"R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
{R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
{R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
{R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
{R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL"},
{R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0"},
{R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1"},
{R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2"},
{R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS"},
{R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL"},
{R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE"},
{R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
"R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
{R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */
{R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
{R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
{R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"},
{R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"},
{R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2"},
{R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3"},
{R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3"},
{R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4"},
{R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4"},
{R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5"},
{R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5"},
{RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0"},
{RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1"},
{RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2"},
{R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR"},
{R200_SE_TCL_POINT_SPRITE_CNTL, 1, "R200_SE_TCL_POINT_SPRITE_CNTL"},
{RADEON_PP_CUBIC_FACES_0, 1, "RADEON_PP_CUBIC_FACES_0"},
{RADEON_PP_CUBIC_OFFSET_T0_0, 5, "RADEON_PP_CUBIC_OFFSET_T0_0"},
{RADEON_PP_CUBIC_FACES_1, 1, "RADEON_PP_CUBIC_FACES_1"},
{RADEON_PP_CUBIC_OFFSET_T1_0, 5, "RADEON_PP_CUBIC_OFFSET_T1_0"},
{RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
{RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
{R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
{R200_PP_TXCBLEND_8, 32, "R200_PP_AFS_0"}, /* 85 */
{R200_PP_TXCBLEND_0, 32, "R200_PP_AFS_1"},
{R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
{R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
{R200_PP_TXFILTER_1, 8, "R200_PP_TXCTLALL_1"},
{R200_PP_TXFILTER_2, 8, "R200_PP_TXCTLALL_2"},
{R200_PP_TXFILTER_3, 8, "R200_PP_TXCTLALL_3"},
{R200_PP_TXFILTER_4, 8, "R200_PP_TXCTLALL_4"},
{R200_PP_TXFILTER_5, 8, "R200_PP_TXCTLALL_5"},
{R200_VAP_PVS_CNTL_1, 2, "R200_VAP_PVS_CNTL"},
};
/* =============================================================
* State initialization
*/
static int cmdpkt( r100ContextPtr rmesa, int id )
{
return CP_PACKET0(packet[id].start, packet[id].len - 1);
}
static int cmdvec( int offset, int stride, int count )
{
drm_radeon_cmd_header_t h;
h.i = 0;
h.vectors.cmd_type = RADEON_CMD_VECTORS;
h.vectors.offset = offset;
h.vectors.stride = stride;
h.vectors.count = count;
return h.i;
}
static int cmdscl( int offset, int stride, int count )
{
drm_radeon_cmd_header_t h;
h.i = 0;
h.scalars.cmd_type = RADEON_CMD_SCALARS;
h.scalars.offset = offset;
h.scalars.stride = stride;
h.scalars.count = count;
return h.i;
}
#define CHECK( NM, FLAG, ADD ) \
static int check_##NM( struct gl_context *ctx, struct radeon_state_atom *atom ) \
{ \
return FLAG ? atom->cmd_size + (ADD) : 0; \
}
#define TCL_CHECK( NM, FLAG, ADD ) \
static int check_##NM( struct gl_context *ctx, struct radeon_state_atom *atom ) \
{ \
r100ContextPtr rmesa = R100_CONTEXT(ctx); \
return (!rmesa->radeon.TclFallback && (FLAG)) ? atom->cmd_size + (ADD) : 0; \
}
CHECK( always, GL_TRUE, 0 )
CHECK( always_add2, GL_TRUE, 2 )
CHECK( always_add4, GL_TRUE, 4 )
CHECK( tex0_mm, GL_TRUE, 3 )
CHECK( tex1_mm, GL_TRUE, 3 )
/* need this for the cubic_map on disabled unit 2 bug, maybe r100 only? */
CHECK( tex2_mm, GL_TRUE, 3 )
CHECK( cube0_mm, (ctx->Texture.Unit[0]._Current && ctx->Texture.Unit[0]._Current->Target == GL_TEXTURE_CUBE_MAP), 2 + 4*5 - CUBE_STATE_SIZE )
CHECK( cube1_mm, (ctx->Texture.Unit[1]._Current && ctx->Texture.Unit[1]._Current->Target == GL_TEXTURE_CUBE_MAP), 2 + 4*5 - CUBE_STATE_SIZE )
CHECK( cube2_mm, (ctx->Texture.Unit[2]._Current && ctx->Texture.Unit[2]._Current->Target == GL_TEXTURE_CUBE_MAP), 2 + 4*5 - CUBE_STATE_SIZE )
CHECK( fog_add4, ctx->Fog.Enabled, 4 )
TCL_CHECK( tcl_add4, GL_TRUE, 4 )
TCL_CHECK( tcl_tex0_add4, ctx->Texture.Unit[0]._Current, 4 )
TCL_CHECK( tcl_tex1_add4, ctx->Texture.Unit[1]._Current, 4 )
TCL_CHECK( tcl_tex2_add4, ctx->Texture.Unit[2]._Current, 4 )
TCL_CHECK( tcl_lighting, ctx->Light.Enabled, 0 )
TCL_CHECK( tcl_lighting_add4, ctx->Light.Enabled, 4 )
TCL_CHECK( tcl_eyespace_or_lighting_add4, ctx->_NeedEyeCoords || ctx->Light.Enabled, 4 )
TCL_CHECK( tcl_lit0_add6, ctx->Light.Enabled && ctx->Light.Light[0].Enabled, 6 )
TCL_CHECK( tcl_lit1_add6, ctx->Light.Enabled && ctx->Light.Light[1].Enabled, 6 )
TCL_CHECK( tcl_lit2_add6, ctx->Light.Enabled && ctx->Light.Light[2].Enabled, 6 )
TCL_CHECK( tcl_lit3_add6, ctx->Light.Enabled && ctx->Light.Light[3].Enabled, 6 )
TCL_CHECK( tcl_lit4_add6, ctx->Light.Enabled && ctx->Light.Light[4].Enabled, 6 )
TCL_CHECK( tcl_lit5_add6, ctx->Light.Enabled && ctx->Light.Light[5].Enabled, 6 )
TCL_CHECK( tcl_lit6_add6, ctx->Light.Enabled && ctx->Light.Light[6].Enabled, 6 )
TCL_CHECK( tcl_lit7_add6, ctx->Light.Enabled && ctx->Light.Light[7].Enabled, 6 )
TCL_CHECK( tcl_ucp0_add4, (ctx->Transform.ClipPlanesEnabled & 0x1), 4 )
TCL_CHECK( tcl_ucp1_add4, (ctx->Transform.ClipPlanesEnabled & 0x2), 4 )
TCL_CHECK( tcl_ucp2_add4, (ctx->Transform.ClipPlanesEnabled & 0x4), 4 )
TCL_CHECK( tcl_ucp3_add4, (ctx->Transform.ClipPlanesEnabled & 0x8), 4 )
TCL_CHECK( tcl_ucp4_add4, (ctx->Transform.ClipPlanesEnabled & 0x10), 4 )
TCL_CHECK( tcl_ucp5_add4, (ctx->Transform.ClipPlanesEnabled & 0x20), 4 )
TCL_CHECK( tcl_eyespace_or_fog_add4, ctx->_NeedEyeCoords || ctx->Fog.Enabled, 4 )
CHECK( txr0, (ctx->Texture.Unit[0]._Current && ctx->Texture.Unit[0]._Current->Target == GL_TEXTURE_RECTANGLE), 0 )
CHECK( txr1, (ctx->Texture.Unit[1]._Current && ctx->Texture.Unit[1]._Current->Target == GL_TEXTURE_RECTANGLE), 0 )
CHECK( txr2, (ctx->Texture.Unit[2]._Current && ctx->Texture.Unit[2]._Current->Target == GL_TEXTURE_RECTANGLE), 0 )
#define OUT_VEC(hdr, data) do { \
drm_radeon_cmd_header_t h; \
h.i = hdr; \
OUT_BATCH(CP_PACKET0(RADEON_SE_TCL_STATE_FLUSH, 0)); \
OUT_BATCH(0); \
OUT_BATCH(CP_PACKET0(R200_SE_TCL_VECTOR_INDX_REG, 0)); \
OUT_BATCH(h.vectors.offset | (h.vectors.stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); \
OUT_BATCH(CP_PACKET0_ONE(R200_SE_TCL_VECTOR_DATA_REG, h.vectors.count - 1)); \
OUT_BATCH_TABLE((data), h.vectors.count); \
} while(0)
#define OUT_SCL(hdr, data) do { \
drm_radeon_cmd_header_t h; \
h.i = hdr; \
OUT_BATCH(CP_PACKET0(R200_SE_TCL_SCALAR_INDX_REG, 0)); \
OUT_BATCH((h.scalars.offset) | (h.scalars.stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); \
OUT_BATCH(CP_PACKET0_ONE(R200_SE_TCL_SCALAR_DATA_REG, h.scalars.count - 1)); \
OUT_BATCH_TABLE((data), h.scalars.count); \
} while(0)
static void scl_emit(struct gl_context *ctx, struct radeon_state_atom *atom)
{
r100ContextPtr r100 = R100_CONTEXT(ctx);
BATCH_LOCALS(&r100->radeon);
uint32_t dwords = atom->check(ctx, atom);
BEGIN_BATCH(dwords);
OUT_SCL(atom->cmd[0], atom->cmd+1);
END_BATCH();
}
static void vec_emit(struct gl_context *ctx, struct radeon_state_atom *atom)
{
r100ContextPtr r100 = R100_CONTEXT(ctx);
BATCH_LOCALS(&r100->radeon);
uint32_t dwords = atom->check(ctx, atom);
BEGIN_BATCH(dwords);
OUT_VEC(atom->cmd[0], atom->cmd+1);
END_BATCH();
}
static void lit_emit(struct gl_context *ctx, struct radeon_state_atom *atom)
{
r100ContextPtr r100 = R100_CONTEXT(ctx);
BATCH_LOCALS(&r100->radeon);
uint32_t dwords = atom->check(ctx, atom);
BEGIN_BATCH(dwords);
OUT_VEC(atom->cmd[LIT_CMD_0], atom->cmd+1);
OUT_SCL(atom->cmd[LIT_CMD_1], atom->cmd+LIT_CMD_1+1);
END_BATCH();
}
static int check_always_ctx( struct gl_context *ctx, struct radeon_state_atom *atom)
{
r100ContextPtr r100 = R100_CONTEXT(ctx);
struct radeon_renderbuffer *rrb, *drb;
uint32_t dwords;
rrb = radeon_get_colorbuffer(&r100->radeon);
if (!rrb || !rrb->bo) {
return 0;
}
drb = radeon_get_depthbuffer(&r100->radeon);
dwords = 10;
if (drb)
dwords += 6;
if (rrb)
dwords += 8;
return dwords;
}
static void ctx_emit_cs(struct gl_context *ctx, struct radeon_state_atom *atom)
{
r100ContextPtr r100 = R100_CONTEXT(ctx);
BATCH_LOCALS(&r100->radeon);
struct radeon_renderbuffer *rrb, *drb;
uint32_t cbpitch = 0;
uint32_t zbpitch = 0;
uint32_t dwords = atom->check(ctx, atom);
uint32_t depth_fmt;
rrb = radeon_get_colorbuffer(&r100->radeon);
if (!rrb || !rrb->bo) {
fprintf(stderr, "no rrb\n");
return;
}
atom->cmd[CTX_RB3D_CNTL] &= ~(0xf << 10);
if (rrb->cpp == 4)
atom->cmd[CTX_RB3D_CNTL] |= RADEON_COLOR_FORMAT_ARGB8888;
else switch (rrb->base.Base.Format) {
case MESA_FORMAT_B5G6R5_UNORM:
case MESA_FORMAT_R5G6B5_UNORM:
atom->cmd[CTX_RB3D_CNTL] |= RADEON_COLOR_FORMAT_RGB565;
break;
case MESA_FORMAT_B4G4R4A4_UNORM:
case MESA_FORMAT_A4R4G4B4_UNORM:
atom->cmd[CTX_RB3D_CNTL] |= RADEON_COLOR_FORMAT_ARGB4444;
break;
case MESA_FORMAT_B5G5R5A1_UNORM:
case MESA_FORMAT_A1R5G5B5_UNORM:
atom->cmd[CTX_RB3D_CNTL] |= RADEON_COLOR_FORMAT_ARGB1555;
break;
default:
_mesa_problem(ctx, "unexpected format in ctx_emit_cs()");
}
cbpitch = (rrb->pitch / rrb->cpp);
if (rrb->bo->flags & RADEON_BO_FLAGS_MACRO_TILE)
cbpitch |= R200_COLOR_TILE_ENABLE;
if (rrb->bo->flags & RADEON_BO_FLAGS_MICRO_TILE)
cbpitch |= RADEON_COLOR_MICROTILE_ENABLE;
drb = radeon_get_depthbuffer(&r100->radeon);
if (drb) {
zbpitch = (drb->pitch / drb->cpp);
if (drb->cpp == 4)
depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
else
depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
atom->cmd[CTX_RB3D_ZSTENCILCNTL] &= ~RADEON_DEPTH_FORMAT_MASK;
atom->cmd[CTX_RB3D_ZSTENCILCNTL] |= depth_fmt;
}
BEGIN_BATCH(dwords);
/* In the CS case we need to split this up */
OUT_BATCH(CP_PACKET0(packet[0].start, 3));
OUT_BATCH_TABLE((atom->cmd + 1), 4);
if (drb) {
OUT_BATCH(CP_PACKET0(RADEON_RB3D_DEPTHOFFSET, 0));
OUT_BATCH_RELOC(drb->bo, 0, 0, RADEON_GEM_DOMAIN_VRAM, 0);
OUT_BATCH(CP_PACKET0(RADEON_RB3D_DEPTHPITCH, 0));
OUT_BATCH(zbpitch);
}
OUT_BATCH(CP_PACKET0(RADEON_RB3D_ZSTENCILCNTL, 0));
OUT_BATCH(atom->cmd[CTX_RB3D_ZSTENCILCNTL]);
OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 1));
OUT_BATCH(atom->cmd[CTX_PP_CNTL]);
OUT_BATCH(atom->cmd[CTX_RB3D_CNTL]);
if (rrb) {
OUT_BATCH(CP_PACKET0(RADEON_RB3D_COLOROFFSET, 0));
OUT_BATCH_RELOC(rrb->bo, rrb->draw_offset, 0, RADEON_GEM_DOMAIN_VRAM, 0);
OUT_BATCH(CP_PACKET0(RADEON_RB3D_COLORPITCH, 0));
OUT_BATCH_RELOC(rrb->bo, cbpitch, 0, RADEON_GEM_DOMAIN_VRAM, 0);
}
// if (atom->cmd_size == CTX_STATE_SIZE_NEWDRM) {
// OUT_BATCH_TABLE((atom->cmd + 14), 4);
// }
END_BATCH();
BEGIN_BATCH(4);
OUT_BATCH(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
OUT_BATCH(0);
OUT_BATCH(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
if (rrb) {
OUT_BATCH(((rrb->base.Base.Width - 1) << RADEON_RE_WIDTH_SHIFT) |
((rrb->base.Base.Height - 1) << RADEON_RE_HEIGHT_SHIFT));
} else {
OUT_BATCH(0);
}
END_BATCH();
}
static void cube_emit_cs(struct gl_context *ctx, struct radeon_state_atom *atom)
{
r100ContextPtr r100 = R100_CONTEXT(ctx);
BATCH_LOCALS(&r100->radeon);
uint32_t dwords = atom->check(ctx, atom);
int i = atom->idx, j;
radeonTexObj *t = r100->state.texture.unit[i].texobj;
radeon_mipmap_level *lvl;
uint32_t base_reg;
if (!ctx->Texture.Unit[i]._Current ||
ctx->Texture.Unit[i]._Current->Target != GL_TEXTURE_CUBE_MAP)
return;
if (!t)
return;
if (!t->mt)
return;
switch(i) {
case 1: base_reg = RADEON_PP_CUBIC_OFFSET_T1_0; break;
case 2: base_reg = RADEON_PP_CUBIC_OFFSET_T2_0; break;
default:
case 0: base_reg = RADEON_PP_CUBIC_OFFSET_T0_0; break;
}
BEGIN_BATCH(dwords);
OUT_BATCH_TABLE(atom->cmd, 2);
lvl = &t->mt->levels[0];
for (j = 0; j < 5; j++) {
OUT_BATCH(CP_PACKET0(base_reg + (4 * j), 0));
OUT_BATCH_RELOC(t->mt->bo, lvl->faces[j].offset,
RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
}
END_BATCH();
}
static void tex_emit_cs(struct gl_context *ctx, struct radeon_state_atom *atom)
{
r100ContextPtr r100 = R100_CONTEXT(ctx);
BATCH_LOCALS(&r100->radeon);
uint32_t dwords = atom->cmd_size;
int i = atom->idx;
radeonTexObj *t = r100->state.texture.unit[i].texobj;
radeon_mipmap_level *lvl;
int hastexture = 1;
if (!t)
hastexture = 0;
else {
if (!t->mt && !t->bo)
hastexture = 0;
}
dwords += 1;
if (hastexture)
dwords += 2;
else
dwords -= 2;
BEGIN_BATCH(dwords);
OUT_BATCH(CP_PACKET0(RADEON_PP_TXFILTER_0 + (24 * i), 1));
OUT_BATCH_TABLE((atom->cmd + 1), 2);
if (hastexture) {
OUT_BATCH(CP_PACKET0(RADEON_PP_TXOFFSET_0 + (24 * i), 0));
if (t->mt && !t->image_override) {
if (ctx->Texture.Unit[i]._Current &&
ctx->Texture.Unit[i]._Current->Target == GL_TEXTURE_CUBE_MAP) {
lvl = &t->mt->levels[t->minLod];
OUT_BATCH_RELOC(t->mt->bo, lvl->faces[5].offset,
RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
} else {
OUT_BATCH_RELOC(t->mt->bo,
get_base_teximage_offset(t) | t->tile_bits,
RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
}
} else {
if (t->bo)
OUT_BATCH_RELOC(t->bo, t->tile_bits,
RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0);
}
}
OUT_BATCH(CP_PACKET0(RADEON_PP_TXCBLEND_0 + (i * 24), 1));
OUT_BATCH_TABLE((atom->cmd+4), 2);
OUT_BATCH(CP_PACKET0(RADEON_PP_BORDER_COLOR_0 + (i * 4), 0));
OUT_BATCH((atom->cmd[TEX_PP_BORDER_COLOR]));
END_BATCH();
}
/* Initialize the context's hardware state.
*/
void radeonInitState( r100ContextPtr rmesa )
{
struct gl_context *ctx = &rmesa->radeon.glCtx;
GLuint i;
rmesa->radeon.Fallback = 0;
rmesa->radeon.hw.max_state_size = 0;
#define ALLOC_STATE_IDX( ATOM, CHK, SZ, NM, FLAG, IDX ) \
do { \
rmesa->hw.ATOM.cmd_size = SZ; \
rmesa->hw.ATOM.cmd = (GLuint *) calloc(SZ, sizeof(int)); \
rmesa->hw.ATOM.lastcmd = (GLuint *) calloc(SZ, sizeof(int)); \
rmesa->hw.ATOM.name = NM; \
rmesa->hw.ATOM.is_tcl = FLAG; \
rmesa->hw.ATOM.check = check_##CHK; \
rmesa->hw.ATOM.dirty = GL_TRUE; \
rmesa->hw.ATOM.idx = IDX; \
rmesa->radeon.hw.max_state_size += SZ * sizeof(int); \
} while (0)
#define ALLOC_STATE( ATOM, CHK, SZ, NM, FLAG ) \
ALLOC_STATE_IDX(ATOM, CHK, SZ, NM, FLAG, 0)
/* Allocate state buffers:
*/
ALLOC_STATE( ctx, always_add4, CTX_STATE_SIZE, "CTX/context", 0 );
rmesa->hw.ctx.emit = ctx_emit_cs;
rmesa->hw.ctx.check = check_always_ctx;
ALLOC_STATE( lin, always, LIN_STATE_SIZE, "LIN/line", 0 );
ALLOC_STATE( msk, always, MSK_STATE_SIZE, "MSK/mask", 0 );
ALLOC_STATE( vpt, always, VPT_STATE_SIZE, "VPT/viewport", 0 );
ALLOC_STATE( set, always, SET_STATE_SIZE, "SET/setup", 0 );
ALLOC_STATE( msc, always, MSC_STATE_SIZE, "MSC/misc", 0 );
ALLOC_STATE( zbs, always, ZBS_STATE_SIZE, "ZBS/zbias", 0 );
ALLOC_STATE( tcl, always, TCL_STATE_SIZE, "TCL/tcl", 1 );
ALLOC_STATE( mtl, tcl_lighting, MTL_STATE_SIZE, "MTL/material", 1 );
ALLOC_STATE( grd, always_add2, GRD_STATE_SIZE, "GRD/guard-band", 1 );
ALLOC_STATE( fog, fog_add4, FOG_STATE_SIZE, "FOG/fog", 1 );
ALLOC_STATE( glt, tcl_lighting_add4, GLT_STATE_SIZE, "GLT/light-global", 1 );
ALLOC_STATE( eye, tcl_lighting_add4, EYE_STATE_SIZE, "EYE/eye-vector", 1 );
ALLOC_STATE_IDX( tex[0], tex0_mm, TEX_STATE_SIZE, "TEX/tex-0", 0, 0);
ALLOC_STATE_IDX( tex[1], tex1_mm, TEX_STATE_SIZE, "TEX/tex-1", 0, 1);
ALLOC_STATE_IDX( tex[2], tex2_mm, TEX_STATE_SIZE, "TEX/tex-2", 0, 2);
ALLOC_STATE( mat[0], tcl_add4, MAT_STATE_SIZE, "MAT/modelproject", 1 );
ALLOC_STATE( mat[1], tcl_eyespace_or_fog_add4, MAT_STATE_SIZE, "MAT/modelview", 1 );
ALLOC_STATE( mat[2], tcl_eyespace_or_lighting_add4, MAT_STATE_SIZE, "MAT/it-modelview", 1 );
ALLOC_STATE( mat[3], tcl_tex0_add4, MAT_STATE_SIZE, "MAT/texmat0", 1 );
ALLOC_STATE( mat[4], tcl_tex1_add4, MAT_STATE_SIZE, "MAT/texmat1", 1 );
ALLOC_STATE( mat[5], tcl_tex2_add4, MAT_STATE_SIZE, "MAT/texmat2", 1 );
ALLOC_STATE( lit[0], tcl_lit0_add6, LIT_STATE_SIZE, "LIT/light-0", 1 );
ALLOC_STATE( lit[1], tcl_lit1_add6, LIT_STATE_SIZE, "LIT/light-1", 1 );
ALLOC_STATE( lit[2], tcl_lit2_add6, LIT_STATE_SIZE, "LIT/light-2", 1 );
ALLOC_STATE( lit[3], tcl_lit3_add6, LIT_STATE_SIZE, "LIT/light-3", 1 );
ALLOC_STATE( lit[4], tcl_lit4_add6, LIT_STATE_SIZE, "LIT/light-4", 1 );
ALLOC_STATE( lit[5], tcl_lit5_add6, LIT_STATE_SIZE, "LIT/light-5", 1 );
ALLOC_STATE( lit[6], tcl_lit6_add6, LIT_STATE_SIZE, "LIT/light-6", 1 );
ALLOC_STATE( lit[7], tcl_lit7_add6, LIT_STATE_SIZE, "LIT/light-7", 1 );
ALLOC_STATE( ucp[0], tcl_ucp0_add4, UCP_STATE_SIZE, "UCP/userclip-0", 1 );
ALLOC_STATE( ucp[1], tcl_ucp1_add4, UCP_STATE_SIZE, "UCP/userclip-1", 1 );
ALLOC_STATE( ucp[2], tcl_ucp2_add4, UCP_STATE_SIZE, "UCP/userclip-2", 1 );
ALLOC_STATE( ucp[3], tcl_ucp3_add4, UCP_STATE_SIZE, "UCP/userclip-3", 1 );
ALLOC_STATE( ucp[4], tcl_ucp4_add4, UCP_STATE_SIZE, "UCP/userclip-4", 1 );
ALLOC_STATE( ucp[5], tcl_ucp5_add4, UCP_STATE_SIZE, "UCP/userclip-5", 1 );
ALLOC_STATE( stp, always, STP_STATE_SIZE, "STP/stp", 0 );
for (i = 0; i < 3; i++) {
rmesa->hw.tex[i].emit = tex_emit_cs;
}
ALLOC_STATE_IDX( cube[0], cube0_mm, CUBE_STATE_SIZE, "CUBE/cube-0", 0, 0 );
ALLOC_STATE_IDX( cube[1], cube1_mm, CUBE_STATE_SIZE, "CUBE/cube-1", 0, 1 );
ALLOC_STATE_IDX( cube[2], cube2_mm, CUBE_STATE_SIZE, "CUBE/cube-2", 0, 2 );
for (i = 0; i < 3; i++)
rmesa->hw.cube[i].emit = cube_emit_cs;
ALLOC_STATE_IDX( txr[0], txr0, TXR_STATE_SIZE, "TXR/txr-0", 0, 0 );
ALLOC_STATE_IDX( txr[1], txr1, TXR_STATE_SIZE, "TXR/txr-1", 0, 1 );
ALLOC_STATE_IDX( txr[2], txr2, TXR_STATE_SIZE, "TXR/txr-2", 0, 2 );
radeonSetUpAtomList( rmesa );
/* Fill in the packet headers:
*/
rmesa->hw.ctx.cmd[CTX_CMD_0] = cmdpkt(rmesa, RADEON_EMIT_PP_MISC);
rmesa->hw.ctx.cmd[CTX_CMD_1] = cmdpkt(rmesa, RADEON_EMIT_PP_CNTL);
rmesa->hw.ctx.cmd[CTX_CMD_2] = cmdpkt(rmesa, RADEON_EMIT_RB3D_COLORPITCH);
rmesa->hw.lin.cmd[LIN_CMD_0] = cmdpkt(rmesa, RADEON_EMIT_RE_LINE_PATTERN);
rmesa->hw.lin.cmd[LIN_CMD_1] = cmdpkt(rmesa, RADEON_EMIT_SE_LINE_WIDTH);
rmesa->hw.msk.cmd[MSK_CMD_0] = cmdpkt(rmesa, RADEON_EMIT_RB3D_STENCILREFMASK);
rmesa->hw.vpt.cmd[VPT_CMD_0] = cmdpkt(rmesa, RADEON_EMIT_SE_VPORT_XSCALE);
rmesa->hw.set.cmd[SET_CMD_0] = cmdpkt(rmesa, RADEON_EMIT_SE_CNTL);
rmesa->hw.set.cmd[SET_CMD_1] = cmdpkt(rmesa, RADEON_EMIT_SE_CNTL_STATUS);
rmesa->hw.msc.cmd[MSC_CMD_0] = cmdpkt(rmesa, RADEON_EMIT_RE_MISC);
rmesa->hw.tex[0].cmd[TEX_CMD_0] = cmdpkt(rmesa, RADEON_EMIT_PP_TXFILTER_0);
rmesa->hw.tex[0].cmd[TEX_CMD_1] = cmdpkt(rmesa, RADEON_EMIT_PP_BORDER_COLOR_0);
rmesa->hw.tex[1].cmd[TEX_CMD_0] = cmdpkt(rmesa, RADEON_EMIT_PP_TXFILTER_1);
rmesa->hw.tex[1].cmd[TEX_CMD_1] = cmdpkt(rmesa, RADEON_EMIT_PP_BORDER_COLOR_1);
rmesa->hw.tex[2].cmd[TEX_CMD_0] = cmdpkt(rmesa, RADEON_EMIT_PP_TXFILTER_2);
rmesa->hw.tex[2].cmd[TEX_CMD_1] = cmdpkt(rmesa, RADEON_EMIT_PP_BORDER_COLOR_2);
rmesa->hw.cube[0].cmd[CUBE_CMD_0] = cmdpkt(rmesa, RADEON_EMIT_PP_CUBIC_FACES_0);
rmesa->hw.cube[0].cmd[CUBE_CMD_1] = cmdpkt(rmesa, RADEON_EMIT_PP_CUBIC_OFFSETS_T0);
rmesa->hw.cube[1].cmd[CUBE_CMD_0] = cmdpkt(rmesa, RADEON_EMIT_PP_CUBIC_FACES_1);
rmesa->hw.cube[1].cmd[CUBE_CMD_1] = cmdpkt(rmesa, RADEON_EMIT_PP_CUBIC_OFFSETS_T1);
rmesa->hw.cube[2].cmd[CUBE_CMD_0] = cmdpkt(rmesa, RADEON_EMIT_PP_CUBIC_FACES_2);
rmesa->hw.cube[2].cmd[CUBE_CMD_1] = cmdpkt(rmesa, RADEON_EMIT_PP_CUBIC_OFFSETS_T2);
rmesa->hw.zbs.cmd[ZBS_CMD_0] = cmdpkt(rmesa, RADEON_EMIT_SE_ZBIAS_FACTOR);
rmesa->hw.tcl.cmd[TCL_CMD_0] = cmdpkt(rmesa, RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT);
rmesa->hw.mtl.cmd[MTL_CMD_0] =
cmdpkt(rmesa, RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED);
rmesa->hw.txr[0].cmd[TXR_CMD_0] = cmdpkt(rmesa, RADEON_EMIT_PP_TEX_SIZE_0);
rmesa->hw.txr[1].cmd[TXR_CMD_0] = cmdpkt(rmesa, RADEON_EMIT_PP_TEX_SIZE_1);
rmesa->hw.txr[2].cmd[TXR_CMD_0] = cmdpkt(rmesa, RADEON_EMIT_PP_TEX_SIZE_2);
rmesa->hw.grd.cmd[GRD_CMD_0] =
cmdscl( RADEON_SS_VERT_GUARD_CLIP_ADJ_ADDR, 1, 4 );
rmesa->hw.fog.cmd[FOG_CMD_0] =
cmdvec( RADEON_VS_FOG_PARAM_ADDR, 1, 4 );
rmesa->hw.glt.cmd[GLT_CMD_0] =
cmdvec( RADEON_VS_GLOBAL_AMBIENT_ADDR, 1, 4 );
rmesa->hw.eye.cmd[EYE_CMD_0] =
cmdvec( RADEON_VS_EYE_VECTOR_ADDR, 1, 4 );
for (i = 0 ; i < 6; i++) {
rmesa->hw.mat[i].cmd[MAT_CMD_0] =
cmdvec( RADEON_VS_MATRIX_0_ADDR + i*4, 1, 16);
}
for (i = 0 ; i < 8; i++) {
rmesa->hw.lit[i].cmd[LIT_CMD_0] =
cmdvec( RADEON_VS_LIGHT_AMBIENT_ADDR + i, 8, 24 );
rmesa->hw.lit[i].cmd[LIT_CMD_1] =
cmdscl( RADEON_SS_LIGHT_DCD_ADDR + i, 8, 6 );
}
for (i = 0 ; i < 6; i++) {
rmesa->hw.ucp[i].cmd[UCP_CMD_0] =
cmdvec( RADEON_VS_UCP_ADDR + i, 1, 4 );
}
rmesa->hw.stp.cmd[STP_CMD_0] = CP_PACKET0(RADEON_RE_STIPPLE_ADDR, 0);
rmesa->hw.stp.cmd[STP_DATA_0] = 0;
rmesa->hw.stp.cmd[STP_CMD_1] = CP_PACKET0_ONE(RADEON_RE_STIPPLE_DATA, 31);
rmesa->hw.grd.emit = scl_emit;
rmesa->hw.fog.emit = vec_emit;
rmesa->hw.glt.emit = vec_emit;
rmesa->hw.eye.emit = vec_emit;
for (i = 0; i < 6; i++)
rmesa->hw.mat[i].emit = vec_emit;
for (i = 0; i < 8; i++)
rmesa->hw.lit[i].emit = lit_emit;
for (i = 0; i < 6; i++)
rmesa->hw.ucp[i].emit = vec_emit;
/* Initial Harware state:
*/
rmesa->hw.ctx.cmd[CTX_PP_MISC] = (RADEON_ALPHA_TEST_PASS |
RADEON_CHROMA_FUNC_FAIL |
RADEON_CHROMA_KEY_NEAREST |
RADEON_SHADOW_FUNC_EQUAL |
RADEON_SHADOW_PASS_1 /*|
RADEON_RIGHT_HAND_CUBE_OGL */);
rmesa->hw.ctx.cmd[CTX_PP_FOG_COLOR] = (RADEON_FOG_VERTEX |
/* this bit unused for vertex fog */
RADEON_FOG_USE_DEPTH);
rmesa->hw.ctx.cmd[CTX_RE_SOLID_COLOR] = 0x00000000;
rmesa->hw.ctx.cmd[CTX_RB3D_BLENDCNTL] = (RADEON_COMB_FCN_ADD_CLAMP |
RADEON_SRC_BLEND_GL_ONE |
RADEON_DST_BLEND_GL_ZERO );
rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] = (RADEON_Z_TEST_LESS |
RADEON_STENCIL_TEST_ALWAYS |
RADEON_STENCIL_FAIL_KEEP |
RADEON_STENCIL_ZPASS_KEEP |
RADEON_STENCIL_ZFAIL_KEEP |
RADEON_Z_WRITE_ENABLE);
if (rmesa->using_hyperz) {
rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_Z_COMPRESSION_ENABLE |
RADEON_Z_DECOMPRESSION_ENABLE;
if (rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL) {
/* works for q3, but slight rendering errors with glxgears ? */
/* rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_Z_HIERARCHY_ENABLE;*/
/* need this otherwise get lots of lockups with q3 ??? */
rmesa->hw.ctx.cmd[CTX_RB3D_ZSTENCILCNTL] |= RADEON_FORCE_Z_DIRTY;
}
}
rmesa->hw.ctx.cmd[CTX_PP_CNTL] = (RADEON_SCISSOR_ENABLE |
RADEON_ANTI_ALIAS_NONE);
rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] = (RADEON_PLANE_MASK_ENABLE |
RADEON_ZBLOCK16);
switch ( driQueryOptioni( &rmesa->radeon.optionCache, "dither_mode" ) ) {
case DRI_CONF_DITHER_XERRORDIFFRESET:
rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] |= RADEON_DITHER_INIT;
break;
case DRI_CONF_DITHER_ORDERED:
rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] |= RADEON_SCALE_DITHER_ENABLE;
break;
}
if ( driQueryOptioni( &rmesa->radeon.optionCache, "round_mode" ) ==
DRI_CONF_ROUND_ROUND )
rmesa->radeon.state.color.roundEnable = RADEON_ROUND_ENABLE;
else
rmesa->radeon.state.color.roundEnable = 0;
if ( driQueryOptioni (&rmesa->radeon.optionCache, "color_reduction" ) ==
DRI_CONF_COLOR_REDUCTION_DITHER )
rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] |= RADEON_DITHER_ENABLE;
else
rmesa->hw.ctx.cmd[CTX_RB3D_CNTL] |= rmesa->radeon.state.color.roundEnable;
rmesa->hw.set.cmd[SET_SE_CNTL] = (RADEON_FFACE_CULL_CCW |
RADEON_BFACE_SOLID |
RADEON_FFACE_SOLID |
/* RADEON_BADVTX_CULL_DISABLE | */
RADEON_FLAT_SHADE_VTX_LAST |
RADEON_DIFFUSE_SHADE_GOURAUD |
RADEON_ALPHA_SHADE_GOURAUD |
RADEON_SPECULAR_SHADE_GOURAUD |
RADEON_FOG_SHADE_GOURAUD |
RADEON_VPORT_XY_XFORM_ENABLE |
RADEON_VPORT_Z_XFORM_ENABLE |
RADEON_VTX_PIX_CENTER_OGL |
RADEON_ROUND_MODE_TRUNC |
RADEON_ROUND_PREC_8TH_PIX);
rmesa->hw.set.cmd[SET_SE_CNTL_STATUS] =
#ifdef MESA_BIG_ENDIAN
RADEON_VC_32BIT_SWAP;
#else
RADEON_VC_NO_SWAP;
#endif
if (!(rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL)) {
rmesa->hw.set.cmd[SET_SE_CNTL_STATUS] |= RADEON_TCL_BYPASS;
}
rmesa->hw.set.cmd[SET_SE_COORDFMT] = (
RADEON_VTX_W0_IS_NOT_1_OVER_W0 |
RADEON_TEX1_W_ROUTING_USE_Q1);
rmesa->hw.lin.cmd[LIN_RE_LINE_PATTERN] = ((1 << 16) | 0xffff);
rmesa->hw.lin.cmd[LIN_RE_LINE_STATE] =
((0 << RADEON_LINE_CURRENT_PTR_SHIFT) |
(1 << RADEON_LINE_CURRENT_COUNT_SHIFT));
rmesa->hw.lin.cmd[LIN_SE_LINE_WIDTH] = (1 << 4);
rmesa->hw.msk.cmd[MSK_RB3D_STENCILREFMASK] =
((0x00 << RADEON_STENCIL_REF_SHIFT) |
(0xff << RADEON_STENCIL_MASK_SHIFT) |
(0xff << RADEON_STENCIL_WRITEMASK_SHIFT));
rmesa->hw.msk.cmd[MSK_RB3D_ROPCNTL] = RADEON_ROP_COPY;
rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK] = 0xffffffff;
rmesa->hw.msc.cmd[MSC_RE_MISC] =
((0 << RADEON_STIPPLE_X_OFFSET_SHIFT) |
(0 << RADEON_STIPPLE_Y_OFFSET_SHIFT) |
RADEON_STIPPLE_BIG_BIT_ORDER);
rmesa->hw.vpt.cmd[VPT_SE_VPORT_XSCALE] = 0x00000000;
rmesa->hw.vpt.cmd[VPT_SE_VPORT_XOFFSET] = 0x00000000;
rmesa->hw.vpt.cmd[VPT_SE_VPORT_YSCALE] = 0x00000000;
rmesa->hw.vpt.cmd[VPT_SE_VPORT_YOFFSET] = 0x00000000;
rmesa->hw.vpt.cmd[VPT_SE_VPORT_ZSCALE] = 0x00000000;
rmesa->hw.vpt.cmd[VPT_SE_VPORT_ZOFFSET] = 0x00000000;
for ( i = 0 ; i < ctx->Const.MaxTextureUnits ; i++ ) {
rmesa->hw.tex[i].cmd[TEX_PP_TXFILTER] = RADEON_BORDER_MODE_OGL;
rmesa->hw.tex[i].cmd[TEX_PP_TXFORMAT] =
(RADEON_TXFORMAT_ENDIAN_NO_SWAP |
RADEON_TXFORMAT_PERSPECTIVE_ENABLE |
(i << 24) | /* This is one of RADEON_TXFORMAT_ST_ROUTE_STQ[012] */
(2 << RADEON_TXFORMAT_WIDTH_SHIFT) |
(2 << RADEON_TXFORMAT_HEIGHT_SHIFT));
/* Initialize the texture offset to the start of the card texture heap */
// rmesa->hw.tex[i].cmd[TEX_PP_TXOFFSET] =
// rmesa->radeon.radeonScreen->texOffset[RADEON_LOCAL_TEX_HEAP];
rmesa->hw.tex[i].cmd[TEX_PP_BORDER_COLOR] = 0;
rmesa->hw.tex[i].cmd[TEX_PP_TXCBLEND] =
(RADEON_COLOR_ARG_A_ZERO |
RADEON_COLOR_ARG_B_ZERO |
RADEON_COLOR_ARG_C_CURRENT_COLOR |
RADEON_BLEND_CTL_ADD |
RADEON_SCALE_1X |
RADEON_CLAMP_TX);
rmesa->hw.tex[i].cmd[TEX_PP_TXABLEND] =
(RADEON_ALPHA_ARG_A_ZERO |
RADEON_ALPHA_ARG_B_ZERO |
RADEON_ALPHA_ARG_C_CURRENT_ALPHA |
RADEON_BLEND_CTL_ADD |
RADEON_SCALE_1X |
RADEON_CLAMP_TX);
rmesa->hw.tex[i].cmd[TEX_PP_TFACTOR] = 0;
rmesa->hw.cube[i].cmd[CUBE_PP_CUBIC_FACES] = 0;
rmesa->hw.cube[i].cmd[CUBE_PP_CUBIC_OFFSET_0] =
rmesa->radeon.radeonScreen->texOffset[RADEON_LOCAL_TEX_HEAP];
rmesa->hw.cube[i].cmd[CUBE_PP_CUBIC_OFFSET_1] =
rmesa->radeon.radeonScreen->texOffset[RADEON_LOCAL_TEX_HEAP];
rmesa->hw.cube[i].cmd[CUBE_PP_CUBIC_OFFSET_2] =
rmesa->radeon.radeonScreen->texOffset[RADEON_LOCAL_TEX_HEAP];
rmesa->hw.cube[i].cmd[CUBE_PP_CUBIC_OFFSET_3] =
rmesa->radeon.radeonScreen->texOffset[RADEON_LOCAL_TEX_HEAP];
rmesa->hw.cube[i].cmd[CUBE_PP_CUBIC_OFFSET_4] =
rmesa->radeon.radeonScreen->texOffset[RADEON_LOCAL_TEX_HEAP];
}
/* Can only add ST1 at the time of doing some multitex but can keep
* it after that. Errors if DIFFUSE is missing.
*/
rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] =
(RADEON_TCL_VTX_Z0 |
RADEON_TCL_VTX_W0 |
RADEON_TCL_VTX_PK_DIFFUSE
); /* need to keep this uptodate */
rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXSEL] =
( RADEON_TCL_COMPUTE_XYZW |
(RADEON_TCL_TEX_INPUT_TEX_0 << RADEON_TCL_TEX_0_OUTPUT_SHIFT) |
(RADEON_TCL_TEX_INPUT_TEX_1 << RADEON_TCL_TEX_1_OUTPUT_SHIFT) |
(RADEON_TCL_TEX_INPUT_TEX_2 << RADEON_TCL_TEX_2_OUTPUT_SHIFT));
/* XXX */
rmesa->hw.tcl.cmd[TCL_MATRIX_SELECT_0] =
((MODEL << RADEON_MODELVIEW_0_SHIFT) |
(MODEL_IT << RADEON_IT_MODELVIEW_0_SHIFT));
rmesa->hw.tcl.cmd[TCL_MATRIX_SELECT_1] =
((MODEL_PROJ << RADEON_MODELPROJECT_0_SHIFT) |
(TEXMAT_0 << RADEON_TEXMAT_0_SHIFT) |
(TEXMAT_1 << RADEON_TEXMAT_1_SHIFT) |
(TEXMAT_2 << RADEON_TEXMAT_2_SHIFT));
rmesa->hw.tcl.cmd[TCL_UCP_VERT_BLEND_CTL] =
(RADEON_UCP_IN_CLIP_SPACE |
RADEON_CULL_FRONT_IS_CCW);
rmesa->hw.tcl.cmd[TCL_TEXTURE_PROC_CTL] = 0;
rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] =
(RADEON_SPECULAR_LIGHTS |
RADEON_DIFFUSE_SPECULAR_COMBINE |
RADEON_LOCAL_LIGHT_VEC_GL |
(RADEON_LM_SOURCE_STATE_MULT << RADEON_EMISSIVE_SOURCE_SHIFT) |
(RADEON_LM_SOURCE_STATE_MULT << RADEON_AMBIENT_SOURCE_SHIFT) |
(RADEON_LM_SOURCE_STATE_MULT << RADEON_DIFFUSE_SOURCE_SHIFT) |
(RADEON_LM_SOURCE_STATE_MULT << RADEON_SPECULAR_SOURCE_SHIFT));
for (i = 0 ; i < 8; i++) {
struct gl_light_uniforms *lu = &ctx->Light.LightSource[i];
GLenum p = GL_LIGHT0 + i;
*(float *)&(rmesa->hw.lit[i].cmd[LIT_RANGE_CUTOFF]) = FLT_MAX;
ctx->Driver.Lightfv( ctx, p, GL_AMBIENT, lu->Ambient );
ctx->Driver.Lightfv( ctx, p, GL_DIFFUSE, lu->Diffuse );
ctx->Driver.Lightfv( ctx, p, GL_SPECULAR, lu->Specular );
ctx->Driver.Lightfv( ctx, p, GL_POSITION, NULL );
ctx->Driver.Lightfv( ctx, p, GL_SPOT_DIRECTION, NULL );
ctx->Driver.Lightfv( ctx, p, GL_SPOT_EXPONENT, &lu->SpotExponent );
ctx->Driver.Lightfv( ctx, p, GL_SPOT_CUTOFF, &lu->SpotCutoff );
ctx->Driver.Lightfv( ctx, p, GL_CONSTANT_ATTENUATION,
&lu->ConstantAttenuation );
ctx->Driver.Lightfv( ctx, p, GL_LINEAR_ATTENUATION,
&lu->LinearAttenuation );
ctx->Driver.Lightfv( ctx, p, GL_QUADRATIC_ATTENUATION,
&lu->QuadraticAttenuation );
*(float *)&(rmesa->hw.lit[i].cmd[LIT_ATTEN_XXX]) = 0.0;
}
ctx->Driver.LightModelfv( ctx, GL_LIGHT_MODEL_AMBIENT,
ctx->Light.Model.Ambient );
TNL_CONTEXT(ctx)->Driver.NotifyMaterialChange( ctx );
for (i = 0 ; i < 6; i++) {
ctx->Driver.ClipPlane( ctx, GL_CLIP_PLANE0 + i, NULL );
}
ctx->Driver.Fogfv( ctx, GL_FOG_MODE, NULL );
ctx->Driver.Fogfv( ctx, GL_FOG_DENSITY, &ctx->Fog.Density );
ctx->Driver.Fogfv( ctx, GL_FOG_START, &ctx->Fog.Start );
ctx->Driver.Fogfv( ctx, GL_FOG_END, &ctx->Fog.End );
ctx->Driver.Fogfv( ctx, GL_FOG_COLOR, ctx->Fog.Color );
ctx->Driver.Fogfv( ctx, GL_FOG_COORDINATE_SOURCE_EXT, NULL );
rmesa->hw.grd.cmd[GRD_VERT_GUARD_CLIP_ADJ] = IEEE_ONE;
rmesa->hw.grd.cmd[GRD_VERT_GUARD_DISCARD_ADJ] = IEEE_ONE;
rmesa->hw.grd.cmd[GRD_HORZ_GUARD_CLIP_ADJ] = IEEE_ONE;
rmesa->hw.grd.cmd[GRD_HORZ_GUARD_DISCARD_ADJ] = IEEE_ONE;
rmesa->hw.eye.cmd[EYE_X] = 0;
rmesa->hw.eye.cmd[EYE_Y] = 0;
rmesa->hw.eye.cmd[EYE_Z] = IEEE_ONE;
rmesa->hw.eye.cmd[EYE_RESCALE_FACTOR] = IEEE_ONE;
radeon_init_query_stateobj(&rmesa->radeon, R100_QUERYOBJ_CMDSIZE);
rmesa->radeon.query.queryobj.cmd[R100_QUERYOBJ_CMD_0] = CP_PACKET0(RADEON_RB3D_ZPASS_DATA, 0);
rmesa->radeon.query.queryobj.cmd[R100_QUERYOBJ_DATA_0] = 0;
rmesa->radeon.hw.all_dirty = GL_TRUE;
rcommonInitCmdBuf(&rmesa->radeon);
}

View File

@ -1,880 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VA Linux Systems Inc., Fremont, California.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Keith Whitwell <keithw@vmware.com>
*/
#include "main/glheader.h"
#include "main/mtypes.h"
#include "main/enums.h"
#include "main/macros.h"
#include "main/state.h"
#include "math/m_xform.h"
#include "swrast_setup/swrast_setup.h"
#include "tnl/tnl.h"
#include "tnl/t_context.h"
#include "tnl/t_pipeline.h"
#include "radeon_context.h"
#include "radeon_ioctl.h"
#include "radeon_state.h"
#include "radeon_swtcl.h"
#include "radeon_tcl.h"
#include "radeon_debug.h"
/* R100: xyzw, c0, c1/fog, stq[0..2] = 4+1+1+3*3 = 15 right? */
/* R200: xyzw, c0, c1/fog, strq[0..5] = 4+1+1+4*6 = 30 */
#define RADEON_MAX_TNL_VERTEX_SIZE (15 * sizeof(GLfloat)) /* for mesa _tnl stage */
/***********************************************************************
* Initialization
***********************************************************************/
#define EMIT_ATTR( ATTR, STYLE, F0 ) \
do { \
rmesa->radeon.swtcl.vertex_attrs[rmesa->radeon.swtcl.vertex_attr_count].attrib = (ATTR); \
rmesa->radeon.swtcl.vertex_attrs[rmesa->radeon.swtcl.vertex_attr_count].format = (STYLE); \
rmesa->radeon.swtcl.vertex_attr_count++; \
fmt_0 |= F0; \
} while (0)
#define EMIT_PAD( N ) \
do { \
rmesa->radeon.swtcl.vertex_attrs[rmesa->radeon.swtcl.vertex_attr_count].attrib = 0; \
rmesa->radeon.swtcl.vertex_attrs[rmesa->radeon.swtcl.vertex_attr_count].format = EMIT_PAD; \
rmesa->radeon.swtcl.vertex_attrs[rmesa->radeon.swtcl.vertex_attr_count].offset = (N); \
rmesa->radeon.swtcl.vertex_attr_count++; \
} while (0)
static GLuint radeon_cp_vc_frmts[3][2] =
{
{ RADEON_CP_VC_FRMT_ST0, RADEON_CP_VC_FRMT_ST0 | RADEON_CP_VC_FRMT_Q0 },
{ RADEON_CP_VC_FRMT_ST1, RADEON_CP_VC_FRMT_ST1 | RADEON_CP_VC_FRMT_Q1 },
{ RADEON_CP_VC_FRMT_ST2, RADEON_CP_VC_FRMT_ST2 | RADEON_CP_VC_FRMT_Q2 },
};
static void radeonSetVertexFormat( struct gl_context *ctx )
{
r100ContextPtr rmesa = R100_CONTEXT( ctx );
TNLcontext *tnl = TNL_CONTEXT(ctx);
struct vertex_buffer *VB = &tnl->vb;
GLbitfield64 index_bitset = tnl->render_inputs_bitset;
int fmt_0 = 0;
int offset = 0;
/* Important:
*/
if ( VB->NdcPtr != NULL ) {
VB->AttribPtr[VERT_ATTRIB_POS] = VB->NdcPtr;
}
else {
VB->AttribPtr[VERT_ATTRIB_POS] = VB->ClipPtr;
}
assert( VB->AttribPtr[VERT_ATTRIB_POS] != NULL );
rmesa->radeon.swtcl.vertex_attr_count = 0;
/* EMIT_ATTR's must be in order as they tell t_vertex.c how to
* build up a hardware vertex.
*/
if ( !rmesa->swtcl.needproj ||
(index_bitset & BITFIELD64_RANGE(_TNL_ATTRIB_TEX0, _TNL_NUM_TEX))) {
/* for projtex */
EMIT_ATTR( _TNL_ATTRIB_POS, EMIT_4F,
RADEON_CP_VC_FRMT_XY | RADEON_CP_VC_FRMT_Z | RADEON_CP_VC_FRMT_W0 );
offset = 4;
}
else {
EMIT_ATTR( _TNL_ATTRIB_POS, EMIT_3F,
RADEON_CP_VC_FRMT_XY | RADEON_CP_VC_FRMT_Z );
offset = 3;
}
rmesa->swtcl.coloroffset = offset;
#if MESA_LITTLE_ENDIAN
EMIT_ATTR( _TNL_ATTRIB_COLOR0, EMIT_4UB_4F_RGBA,
RADEON_CP_VC_FRMT_PKCOLOR );
#else
EMIT_ATTR( _TNL_ATTRIB_COLOR0, EMIT_4UB_4F_ABGR,
RADEON_CP_VC_FRMT_PKCOLOR );
#endif
offset += 1;
rmesa->swtcl.specoffset = 0;
if (index_bitset &
(BITFIELD64_BIT(_TNL_ATTRIB_COLOR1) | BITFIELD64_BIT(_TNL_ATTRIB_FOG))) {
#if MESA_LITTLE_ENDIAN
if (index_bitset & BITFIELD64_BIT(_TNL_ATTRIB_COLOR1)) {
rmesa->swtcl.specoffset = offset;
EMIT_ATTR( _TNL_ATTRIB_COLOR1, EMIT_3UB_3F_RGB,
RADEON_CP_VC_FRMT_PKSPEC );
}
else {
EMIT_PAD( 3 );
}
if (index_bitset & BITFIELD64_BIT(_TNL_ATTRIB_FOG)) {
EMIT_ATTR( _TNL_ATTRIB_FOG, EMIT_1UB_1F,
RADEON_CP_VC_FRMT_PKSPEC );
}
else {
EMIT_PAD( 1 );
}
#else
if (index_bitset & BITFIELD64_BIT(_TNL_ATTRIB_FOG)) {
EMIT_ATTR( _TNL_ATTRIB_FOG, EMIT_1UB_1F,
RADEON_CP_VC_FRMT_PKSPEC );
}
else {
EMIT_PAD( 1 );
}
if (index_bitset & BITFIELD64_BIT(_TNL_ATTRIB_COLOR1)) {
rmesa->swtcl.specoffset = offset;
EMIT_ATTR( _TNL_ATTRIB_COLOR1, EMIT_3UB_3F_BGR,
RADEON_CP_VC_FRMT_PKSPEC );
}
else {
EMIT_PAD( 3 );
}
#endif
}
if (index_bitset & BITFIELD64_RANGE(_TNL_ATTRIB_TEX0, _TNL_NUM_TEX)) {
int i;
for (i = 0; i < ctx->Const.MaxTextureUnits; i++) {
if (index_bitset & BITFIELD64_BIT(_TNL_ATTRIB_TEX(i))) {
GLuint sz = VB->AttribPtr[_TNL_ATTRIB_TEX0 + i]->size;
switch (sz) {
case 1:
case 2:
EMIT_ATTR( _TNL_ATTRIB_TEX0+i, EMIT_2F,
radeon_cp_vc_frmts[i][0] );
break;
case 3:
if (ctx->Texture.Unit[i]._Current &&
ctx->Texture.Unit[i]._Current->Target == GL_TEXTURE_CUBE_MAP) {
EMIT_ATTR( _TNL_ATTRIB_TEX0+i, EMIT_3F,
radeon_cp_vc_frmts[i][1] );
} else {
EMIT_ATTR( _TNL_ATTRIB_TEX0+i, EMIT_2F,
radeon_cp_vc_frmts[i][0] );
}
break;
case 4:
if (ctx->Texture.Unit[i]._Current &&
ctx->Texture.Unit[i]._Current->Target == GL_TEXTURE_CUBE_MAP) {
EMIT_ATTR( _TNL_ATTRIB_TEX0+i, EMIT_3F,
radeon_cp_vc_frmts[i][1] );
} else {
EMIT_ATTR( _TNL_ATTRIB_TEX0+i, EMIT_3F_XYW,
radeon_cp_vc_frmts[i][1] );
}
break;
default:
continue;
}
}
}
}
if (rmesa->radeon.tnl_index_bitset != index_bitset ||
fmt_0 != rmesa->swtcl.vertex_format) {
RADEON_NEWPRIM(rmesa);
rmesa->swtcl.vertex_format = fmt_0;
rmesa->radeon.swtcl.vertex_size =
_tnl_install_attrs( ctx,
rmesa->radeon.swtcl.vertex_attrs,
rmesa->radeon.swtcl.vertex_attr_count,
NULL, 0 );
rmesa->radeon.swtcl.vertex_size /= 4;
rmesa->radeon.tnl_index_bitset = index_bitset;
radeon_print(RADEON_SWRENDER, RADEON_VERBOSE,
"%s: vertex_size= %d floats\n", __func__, rmesa->radeon.swtcl.vertex_size);
}
}
static void radeon_predict_emit_size( r100ContextPtr rmesa )
{
if (!rmesa->radeon.swtcl.emit_prediction) {
const int state_size = radeonCountStateEmitSize( &rmesa->radeon );
const int scissor_size = 8;
const int prims_size = 8;
const int vertex_size = 7;
if (rcommonEnsureCmdBufSpace(&rmesa->radeon,
state_size +
(scissor_size + prims_size + vertex_size),
__func__))
rmesa->radeon.swtcl.emit_prediction = radeonCountStateEmitSize( &rmesa->radeon );
else
rmesa->radeon.swtcl.emit_prediction = state_size;
rmesa->radeon.swtcl.emit_prediction += scissor_size + prims_size + vertex_size
+ rmesa->radeon.cmdbuf.cs->cdw;
}
}
static void radeonRenderStart( struct gl_context *ctx )
{
r100ContextPtr rmesa = R100_CONTEXT( ctx );
radeonSetVertexFormat( ctx );
if (rmesa->radeon.dma.flush != 0 &&
rmesa->radeon.dma.flush != rcommon_flush_last_swtcl_prim)
rmesa->radeon.dma.flush( ctx );
}
/**
* Set vertex state for SW TCL. The primary purpose of this function is to
* determine in advance whether or not the hardware can / should do the
* projection divide or Mesa should do it.
*/
void radeonChooseVertexState( struct gl_context *ctx )
{
r100ContextPtr rmesa = R100_CONTEXT( ctx );
TNLcontext *tnl = TNL_CONTEXT(ctx);
GLuint se_coord_fmt = rmesa->hw.set.cmd[SET_SE_COORDFMT];
GLboolean unfilled = (ctx->Polygon.FrontMode != GL_FILL ||
ctx->Polygon.BackMode != GL_FILL);
GLboolean twosided = ctx->Light.Enabled && ctx->Light.Model.TwoSide;
se_coord_fmt &= ~(RADEON_VTX_XY_PRE_MULT_1_OVER_W0 |
RADEON_VTX_Z_PRE_MULT_1_OVER_W0 |
RADEON_VTX_W0_IS_NOT_1_OVER_W0);
/* We must ensure that we don't do _tnl_need_projected_coords while in a
* rasterization fallback. As this function will be called again when we
* leave a rasterization fallback, we can just skip it for now.
*/
if (rmesa->radeon.Fallback != 0)
return;
/* HW perspective divide is a win, but tiny vertex formats are a
* bigger one.
*/
if ((0 == (tnl->render_inputs_bitset &
(BITFIELD64_RANGE(_TNL_ATTRIB_TEX0, _TNL_NUM_TEX)
| BITFIELD64_BIT(_TNL_ATTRIB_COLOR1))))
|| twosided
|| unfilled) {
rmesa->swtcl.needproj = GL_TRUE;
se_coord_fmt |= (RADEON_VTX_XY_PRE_MULT_1_OVER_W0 |
RADEON_VTX_Z_PRE_MULT_1_OVER_W0);
}
else {
rmesa->swtcl.needproj = GL_FALSE;
se_coord_fmt |= (RADEON_VTX_W0_IS_NOT_1_OVER_W0);
}
_tnl_need_projected_coords( ctx, rmesa->swtcl.needproj );
if ( se_coord_fmt != rmesa->hw.set.cmd[SET_SE_COORDFMT] ) {
RADEON_STATECHANGE( rmesa, set );
rmesa->hw.set.cmd[SET_SE_COORDFMT] = se_coord_fmt;
}
}
void r100_swtcl_flush(struct gl_context *ctx, uint32_t current_offset)
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
radeonEmitState(&rmesa->radeon);
radeonEmitVertexAOS( rmesa,
rmesa->radeon.swtcl.vertex_size,
rmesa->radeon.swtcl.bo,
current_offset);
radeonEmitVbufPrim( rmesa,
rmesa->swtcl.vertex_format,
rmesa->radeon.swtcl.hw_primitive,
rmesa->radeon.swtcl.numverts);
if ( rmesa->radeon.swtcl.emit_prediction < rmesa->radeon.cmdbuf.cs->cdw )
WARN_ONCE("Rendering was %d commands larger than predicted size."
" We might overflow command buffer.\n",
rmesa->radeon.cmdbuf.cs->cdw - rmesa->radeon.swtcl.emit_prediction );
rmesa->radeon.swtcl.emit_prediction = 0;
}
/*
* Render unclipped vertex buffers by emitting vertices directly to
* dma buffers. Use strip/fan hardware primitives where possible.
* Try to simulate missing primitives with indexed vertices.
*/
#define HAVE_POINTS 1
#define HAVE_LINES 1
#define HAVE_LINE_STRIPS 1
#define HAVE_TRIANGLES 1
#define HAVE_TRI_STRIPS 1
#define HAVE_TRI_FANS 1
#define HAVE_POLYGONS 0
/* \todo: is it possible to make "ELTS" work with t_vertex code ? */
#define HAVE_ELTS 0
static const GLuint hw_prim[GL_POLYGON+1] = {
[GL_POINTS] = RADEON_CP_VC_CNTL_PRIM_TYPE_POINT,
[GL_LINES] = RADEON_CP_VC_CNTL_PRIM_TYPE_LINE,
[GL_LINE_LOOP] = 0,
[GL_LINE_STRIP] = RADEON_CP_VC_CNTL_PRIM_TYPE_LINE_STRIP,
[GL_TRIANGLES] = RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST,
[GL_TRIANGLE_STRIP] = RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_STRIP,
[GL_TRIANGLE_FAN] = RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_FAN,
[GL_QUADS] = 0,
[GL_QUAD_STRIP] = 0,
[GL_POLYGON] = 0
};
static inline void
radeonDmaPrimitive( r100ContextPtr rmesa, GLenum prim )
{
RADEON_NEWPRIM( rmesa );
rmesa->radeon.swtcl.hw_primitive = hw_prim[prim];
// assert(rmesa->radeon.dma.current.ptr == rmesa->radeon.dma.current.start);
}
static void* radeon_alloc_verts( r100ContextPtr rmesa , GLuint nr, GLuint size )
{
void *rv;
do {
radeon_predict_emit_size( rmesa );
rv = rcommonAllocDmaLowVerts( &rmesa->radeon, nr, size );
} while (!rv);
return rv;
}
#define LOCAL_VARS r100ContextPtr rmesa = R100_CONTEXT(ctx)
#define INIT( prim ) radeonDmaPrimitive( rmesa, prim )
#define FLUSH() RADEON_NEWPRIM( rmesa )
#define GET_CURRENT_VB_MAX_VERTS() 10\
// (((int)rmesa->radeon.dma.current.end - (int)rmesa->radeon.dma.current.ptr) / (rmesa->radeon.swtcl.vertex_size*4))
#define GET_SUBSEQUENT_VB_MAX_VERTS() \
((RADEON_BUFFER_SIZE) / (rmesa->radeon.swtcl.vertex_size*4))
#define ALLOC_VERTS( nr ) radeon_alloc_verts( rmesa, nr, rmesa->radeon.swtcl.vertex_size * 4 )
#define EMIT_VERTS( ctx, j, nr, buf ) \
_tnl_emit_vertices_to_buffer(ctx, j, (j)+(nr), buf)
#define TAG(x) radeon_dma_##x
#include "tnl_dd/t_dd_dmatmp.h"
/**********************************************************************/
/* Render pipeline stage */
/**********************************************************************/
static GLboolean radeon_run_render( struct gl_context *ctx,
struct tnl_pipeline_stage *stage )
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
TNLcontext *tnl = TNL_CONTEXT(ctx);
struct vertex_buffer *VB = &tnl->vb;
const tnl_render_func *tab = TAG(render_tab_verts);
GLuint i;
if (rmesa->radeon.swtcl.RenderIndex != 0 ||
!radeon_dma_validate_render( ctx, VB ))
return GL_TRUE;
radeon_prepare_render(&rmesa->radeon);
if (rmesa->radeon.NewGLState)
radeonValidateState( ctx );
tnl->Driver.Render.Start( ctx );
for (i = 0 ; i < VB->PrimitiveCount ; i++)
{
GLuint prim = VB->Primitive[i].mode;
GLuint start = VB->Primitive[i].start;
GLuint length = VB->Primitive[i].count;
if (!length)
continue;
radeon_print(RADEON_SWRENDER, RADEON_NORMAL,
"radeon_render.c: prim %s %d..%d\n",
_mesa_enum_to_string(prim & PRIM_MODE_MASK),
start, start+length);
if (length)
tab[prim & PRIM_MODE_MASK](ctx, start, length, prim);
}
tnl->Driver.Render.Finish( ctx );
return GL_FALSE; /* finished the pipe */
}
const struct tnl_pipeline_stage _radeon_render_stage =
{
"radeon render",
NULL,
NULL,
NULL,
NULL,
radeon_run_render /* run */
};
/**************************************************************************/
static const GLuint reduced_hw_prim[GL_POLYGON+1] = {
[GL_POINTS] = RADEON_CP_VC_CNTL_PRIM_TYPE_POINT,
[GL_LINES] = RADEON_CP_VC_CNTL_PRIM_TYPE_LINE,
[GL_LINE_LOOP] = RADEON_CP_VC_CNTL_PRIM_TYPE_LINE,
[GL_LINE_STRIP] = RADEON_CP_VC_CNTL_PRIM_TYPE_LINE,
[GL_TRIANGLES] = RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST,
[GL_TRIANGLE_STRIP] = RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST,
[GL_TRIANGLE_FAN] = RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST,
[GL_QUADS] = RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST,
[GL_QUAD_STRIP] = RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST,
[GL_POLYGON] = RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST
};
static void radeonRasterPrimitive( struct gl_context *ctx, GLuint hwprim );
static void radeonRenderPrimitive( struct gl_context *ctx, GLenum prim );
static void radeonResetLineStipple( struct gl_context *ctx );
/***********************************************************************
* Emit primitives as inline vertices *
***********************************************************************/
#undef LOCAL_VARS
#undef ALLOC_VERTS
#define CTX_ARG r100ContextPtr rmesa
#define GET_VERTEX_DWORDS() rmesa->radeon.swtcl.vertex_size
#define ALLOC_VERTS( n, size ) radeon_alloc_verts( rmesa, n, (size) * 4 )
#undef LOCAL_VARS
#define LOCAL_VARS \
r100ContextPtr rmesa = R100_CONTEXT(ctx); \
const char *radeonverts = (char *)rmesa->radeon.swtcl.verts;
#define VERT(x) (radeonVertex *)(radeonverts + ((x) * (vertsize) * sizeof(int)))
#define VERTEX radeonVertex
#undef TAG
#define TAG(x) radeon_##x
#include "tnl_dd/t_dd_triemit.h"
/***********************************************************************
* Macros for t_dd_tritmp.h to draw basic primitives *
***********************************************************************/
#define QUAD( a, b, c, d ) radeon_quad( rmesa, a, b, c, d )
#define TRI( a, b, c ) radeon_triangle( rmesa, a, b, c )
#define LINE( a, b ) radeon_line( rmesa, a, b )
#define POINT( a ) radeon_point( rmesa, a )
/***********************************************************************
* Build render functions from dd templates *
***********************************************************************/
#define RADEON_TWOSIDE_BIT 0x01
#define RADEON_UNFILLED_BIT 0x02
#define RADEON_MAX_TRIFUNC 0x04
static struct {
tnl_points_func points;
tnl_line_func line;
tnl_triangle_func triangle;
tnl_quad_func quad;
} rast_tab[RADEON_MAX_TRIFUNC];
#define DO_FALLBACK 0
#define DO_OFFSET 0
#define DO_UNFILLED ((IND & RADEON_UNFILLED_BIT) != 0)
#define DO_TWOSIDE ((IND & RADEON_TWOSIDE_BIT) != 0)
#define DO_FLAT 0
#define DO_TRI 1
#define DO_QUAD 1
#define DO_LINE 1
#define DO_POINTS 1
#define DO_FULL_QUAD 1
#define HAVE_SPEC 1
#define HAVE_BACK_COLORS 0
#define HAVE_HW_FLATSHADE 1
#define TAB rast_tab
#define DEPTH_SCALE 1.0
#define UNFILLED_TRI unfilled_tri
#define UNFILLED_QUAD unfilled_quad
#define VERT_X(_v) _v->v.x
#define VERT_Y(_v) _v->v.y
#define VERT_Z(_v) _v->v.z
#define AREA_IS_CCW( a ) (a < 0)
#define GET_VERTEX(e) (rmesa->radeon.swtcl.verts + ((e) * rmesa->radeon.swtcl.vertex_size * sizeof(int)))
#define VERT_SET_RGBA( v, c ) \
do { \
radeon_color_t *color = (radeon_color_t *)&((v)->ui[coloroffset]); \
UNCLAMPED_FLOAT_TO_UBYTE(color->red, (c)[0]); \
UNCLAMPED_FLOAT_TO_UBYTE(color->green, (c)[1]); \
UNCLAMPED_FLOAT_TO_UBYTE(color->blue, (c)[2]); \
UNCLAMPED_FLOAT_TO_UBYTE(color->alpha, (c)[3]); \
} while (0)
#define VERT_COPY_RGBA( v0, v1 ) v0->ui[coloroffset] = v1->ui[coloroffset]
#define VERT_SET_SPEC( v, c ) \
do { \
if (specoffset) { \
radeon_color_t *spec = (radeon_color_t *)&((v)->ui[specoffset]); \
UNCLAMPED_FLOAT_TO_UBYTE(spec->red, (c)[0]); \
UNCLAMPED_FLOAT_TO_UBYTE(spec->green, (c)[1]); \
UNCLAMPED_FLOAT_TO_UBYTE(spec->blue, (c)[2]); \
} \
} while (0)
#define VERT_COPY_SPEC( v0, v1 ) \
do { \
if (specoffset) { \
radeon_color_t *spec0 = (radeon_color_t *)&((v0)->ui[specoffset]); \
radeon_color_t *spec1 = (radeon_color_t *)&((v1)->ui[specoffset]); \
spec0->red = spec1->red; \
spec0->green = spec1->green; \
spec0->blue = spec1->blue; \
} \
} while (0)
/* These don't need LE32_TO_CPU() as they used to save and restore
* colors which are already in the correct format.
*/
#define VERT_SAVE_RGBA( idx ) color[idx] = v[idx]->ui[coloroffset]
#define VERT_RESTORE_RGBA( idx ) v[idx]->ui[coloroffset] = color[idx]
#define VERT_SAVE_SPEC( idx ) if (specoffset) spec[idx] = v[idx]->ui[specoffset]
#define VERT_RESTORE_SPEC( idx ) if (specoffset) v[idx]->ui[specoffset] = spec[idx]
#undef LOCAL_VARS
#undef TAG
#undef INIT
#define LOCAL_VARS(n) \
r100ContextPtr rmesa = R100_CONTEXT(ctx); \
GLuint color[n] = {0}, spec[n] = {0}; \
GLuint coloroffset = rmesa->swtcl.coloroffset; \
GLuint specoffset = rmesa->swtcl.specoffset; \
(void) color; (void) spec; (void) coloroffset; (void) specoffset;
/***********************************************************************
* Helpers for rendering unfilled primitives *
***********************************************************************/
#define RASTERIZE(x) radeonRasterPrimitive( ctx, reduced_hw_prim[x] )
#define RENDER_PRIMITIVE rmesa->radeon.swtcl.render_primitive
#undef TAG
#define TAG(x) x
#include "tnl_dd/t_dd_unfilled.h"
#undef IND
/***********************************************************************
* Generate GL render functions *
***********************************************************************/
#define IND (0)
#define TAG(x) x
#include "tnl_dd/t_dd_tritmp.h"
#define IND (RADEON_TWOSIDE_BIT)
#define TAG(x) x##_twoside
#include "tnl_dd/t_dd_tritmp.h"
#define IND (RADEON_UNFILLED_BIT)
#define TAG(x) x##_unfilled
#include "tnl_dd/t_dd_tritmp.h"
#define IND (RADEON_TWOSIDE_BIT|RADEON_UNFILLED_BIT)
#define TAG(x) x##_twoside_unfilled
#include "tnl_dd/t_dd_tritmp.h"
static void init_rast_tab( void )
{
init();
init_twoside();
init_unfilled();
init_twoside_unfilled();
}
/**********************************************************************/
/* Render unclipped begin/end objects */
/**********************************************************************/
#define RENDER_POINTS( start, count ) \
for ( ; start < count ; start++) \
radeon_point( rmesa, VERT(start) )
#define RENDER_LINE( v0, v1 ) \
radeon_line( rmesa, VERT(v0), VERT(v1) )
#define RENDER_TRI( v0, v1, v2 ) \
radeon_triangle( rmesa, VERT(v0), VERT(v1), VERT(v2) )
#define RENDER_QUAD( v0, v1, v2, v3 ) \
radeon_quad( rmesa, VERT(v0), VERT(v1), VERT(v2), VERT(v3) )
#undef INIT
#define INIT(x) do { \
radeonRenderPrimitive( ctx, x ); \
} while (0)
#undef LOCAL_VARS
#define LOCAL_VARS \
r100ContextPtr rmesa = R100_CONTEXT(ctx); \
const GLuint vertsize = rmesa->radeon.swtcl.vertex_size; \
const char *radeonverts = (char *)rmesa->radeon.swtcl.verts; \
const GLuint * const elt = TNL_CONTEXT(ctx)->vb.Elts; \
const GLboolean stipple = ctx->Line.StippleFlag; \
(void) elt; (void) stipple;
#define RESET_STIPPLE if ( stipple ) radeonResetLineStipple( ctx );
#define RESET_OCCLUSION
#define PRESERVE_VB_DEFS
#define ELT(x) (x)
#define TAG(x) radeon_##x##_verts
#include "tnl/t_vb_rendertmp.h"
#undef ELT
#undef TAG
#define TAG(x) radeon_##x##_elts
#define ELT(x) elt[x]
#include "tnl/t_vb_rendertmp.h"
/**********************************************************************/
/* Choose render functions */
/**********************************************************************/
void radeonChooseRenderState( struct gl_context *ctx )
{
TNLcontext *tnl = TNL_CONTEXT(ctx);
r100ContextPtr rmesa = R100_CONTEXT(ctx);
GLuint index = 0;
GLboolean unfilled = (ctx->Polygon.FrontMode != GL_FILL ||
ctx->Polygon.BackMode != GL_FILL);
GLboolean twosided = ctx->Light.Enabled && ctx->Light.Model.TwoSide;
if (!rmesa->radeon.TclFallback || rmesa->radeon.Fallback)
return;
if (twosided)
index |= RADEON_TWOSIDE_BIT;
if (unfilled)
index |= RADEON_UNFILLED_BIT;
if (index != rmesa->radeon.swtcl.RenderIndex) {
tnl->Driver.Render.Points = rast_tab[index].points;
tnl->Driver.Render.Line = rast_tab[index].line;
tnl->Driver.Render.ClippedLine = rast_tab[index].line;
tnl->Driver.Render.Triangle = rast_tab[index].triangle;
tnl->Driver.Render.Quad = rast_tab[index].quad;
if (index == 0) {
tnl->Driver.Render.PrimTabVerts = radeon_render_tab_verts;
tnl->Driver.Render.PrimTabElts = radeon_render_tab_elts;
tnl->Driver.Render.ClippedPolygon = radeon_fast_clipped_poly;
} else {
tnl->Driver.Render.PrimTabVerts = _tnl_render_tab_verts;
tnl->Driver.Render.PrimTabElts = _tnl_render_tab_elts;
tnl->Driver.Render.ClippedPolygon = _tnl_RenderClippedPolygon;
}
rmesa->radeon.swtcl.RenderIndex = index;
}
}
/**********************************************************************/
/* High level hooks for t_vb_render.c */
/**********************************************************************/
static void radeonRasterPrimitive( struct gl_context *ctx, GLuint hwprim )
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
if (rmesa->radeon.swtcl.hw_primitive != hwprim) {
RADEON_NEWPRIM( rmesa );
rmesa->radeon.swtcl.hw_primitive = hwprim;
}
}
static void radeonRenderPrimitive( struct gl_context *ctx, GLenum prim )
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
GLboolean unfilled = (ctx->Polygon.FrontMode != GL_FILL ||
ctx->Polygon.BackMode != GL_FILL);
rmesa->radeon.swtcl.render_primitive = prim;
if (prim < GL_TRIANGLES || !unfilled)
radeonRasterPrimitive( ctx, reduced_hw_prim[prim] );
}
static void radeonRenderFinish( struct gl_context *ctx )
{
}
static void radeonResetLineStipple( struct gl_context *ctx )
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
RADEON_STATECHANGE( rmesa, lin );
}
/**********************************************************************/
/* Transition to/from hardware rasterization. */
/**********************************************************************/
static const char * const fallbackStrings[] = {
"Texture mode",
"glDrawBuffer(GL_FRONT_AND_BACK)",
"glEnable(GL_STENCIL) without hw stencil buffer",
"glRenderMode(selection or feedback)",
"glBlendEquation",
"glBlendFunc",
"RADEON_NO_RAST",
"Mixing GL_CLAMP_TO_BORDER and GL_CLAMP (or GL_MIRROR_CLAMP_ATI)"
};
static const char *getFallbackString(GLuint bit)
{
int i = 0;
while (bit > 1) {
i++;
bit >>= 1;
}
return fallbackStrings[i];
}
void radeonFallback( struct gl_context *ctx, GLuint bit, GLboolean mode )
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
TNLcontext *tnl = TNL_CONTEXT(ctx);
GLuint oldfallback = rmesa->radeon.Fallback;
if (mode) {
rmesa->radeon.Fallback |= bit;
if (oldfallback == 0) {
radeon_firevertices(&rmesa->radeon);
TCL_FALLBACK( ctx, RADEON_TCL_FALLBACK_RASTER, GL_TRUE );
_swsetup_Wakeup( ctx );
rmesa->radeon.swtcl.RenderIndex = ~0;
if (RADEON_DEBUG & RADEON_FALLBACKS) {
fprintf(stderr, "Radeon begin rasterization fallback: 0x%x %s\n",
bit, getFallbackString(bit));
}
}
}
else {
rmesa->radeon.Fallback &= ~bit;
if (oldfallback == bit) {
_swrast_flush( ctx );
tnl->Driver.Render.Start = radeonRenderStart;
tnl->Driver.Render.PrimitiveNotify = radeonRenderPrimitive;
tnl->Driver.Render.Finish = radeonRenderFinish;
tnl->Driver.Render.BuildVertices = _tnl_build_vertices;
tnl->Driver.Render.CopyPV = _tnl_copy_pv;
tnl->Driver.Render.Interp = _tnl_interp;
tnl->Driver.Render.ResetLineStipple = radeonResetLineStipple;
TCL_FALLBACK( ctx, RADEON_TCL_FALLBACK_RASTER, GL_FALSE );
if (rmesa->radeon.TclFallback) {
/* These are already done if rmesa->radeon.TclFallback goes to
* zero above. But not if it doesn't (RADEON_NO_TCL for
* example?)
*/
_tnl_invalidate_vertex_state( ctx, ~0 );
_tnl_invalidate_vertices( ctx, ~0 );
rmesa->radeon.tnl_index_bitset = 0;
radeonChooseVertexState( ctx );
radeonChooseRenderState( ctx );
}
if (RADEON_DEBUG & RADEON_FALLBACKS) {
fprintf(stderr, "Radeon end rasterization fallback: 0x%x %s\n",
bit, getFallbackString(bit));
}
}
}
}
/**********************************************************************/
/* Initialization. */
/**********************************************************************/
void radeonInitSwtcl( struct gl_context *ctx )
{
TNLcontext *tnl = TNL_CONTEXT(ctx);
r100ContextPtr rmesa = R100_CONTEXT(ctx);
static int firsttime = 1;
if (firsttime) {
init_rast_tab();
firsttime = 0;
}
rmesa->radeon.swtcl.emit_prediction = 0;
tnl->Driver.Render.Start = radeonRenderStart;
tnl->Driver.Render.Finish = radeonRenderFinish;
tnl->Driver.Render.PrimitiveNotify = radeonRenderPrimitive;
tnl->Driver.Render.ResetLineStipple = radeonResetLineStipple;
tnl->Driver.Render.BuildVertices = _tnl_build_vertices;
tnl->Driver.Render.CopyPV = _tnl_copy_pv;
tnl->Driver.Render.Interp = _tnl_interp;
_tnl_init_vertices( ctx, ctx->Const.MaxArrayLockSize + 12,
RADEON_MAX_TNL_VERTEX_SIZE);
rmesa->radeon.swtcl.verts = (GLubyte *)tnl->clipspace.vertex_buf;
rmesa->radeon.swtcl.RenderIndex = ~0;
rmesa->radeon.swtcl.render_primitive = GL_TRIANGLES;
rmesa->radeon.swtcl.hw_primitive = 0;
}

View File

@ -1,66 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VA Linux Systems Inc., Fremont, California.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
on the rights to use, copy, modify, merge, publish, distribute, sub
license, and/or sell copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice (including the next
paragraph) shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
ATI, VA LINUX SYSTEMS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Keith Whitwell <keithw@vmware.com>
*
*/
#ifndef __RADEON_TRIS_H__
#define __RADEON_TRIS_H__
#include "main/mtypes.h"
#include "swrast/swrast.h"
#include "radeon_context.h"
extern void radeonInitSwtcl( struct gl_context *ctx );
extern void radeonChooseRenderState( struct gl_context *ctx );
extern void radeonChooseVertexState( struct gl_context *ctx );
extern void radeonCheckTexSizes( struct gl_context *ctx );
extern void radeonBuildVertices( struct gl_context *ctx, GLuint start, GLuint count,
GLuint newinputs );
extern void radeonPrintSetupFlags(char *msg, GLuint flags );
extern void radeon_emit_indexed_verts( struct gl_context *ctx,
GLuint start,
GLuint count );
extern void radeon_translate_vertex( struct gl_context *ctx,
const radeonVertex *src,
SWvertex *dst );
extern void radeon_print_vertex( struct gl_context *ctx, const radeonVertex *v );
extern void r100_swtcl_flush(struct gl_context *ctx, uint32_t current_offset);
#endif

View File

@ -1,565 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VMware, Inc.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Keith Whitwell <keithw@vmware.com>
*/
#include "main/glheader.h"
#include "main/mtypes.h"
#include "main/light.h"
#include "main/enums.h"
#include "main/state.h"
#include "util/macros.h"
#include "vbo/vbo.h"
#include "tnl/tnl.h"
#include "tnl/t_pipeline.h"
#include "radeon_common.h"
#include "radeon_context.h"
#include "radeon_state.h"
#include "radeon_ioctl.h"
#include "radeon_tcl.h"
#include "radeon_swtcl.h"
#include "radeon_maos.h"
#include "radeon_common_context.h"
/*
* Render unclipped vertex buffers by emitting vertices directly to
* dma buffers. Use strip/fan hardware primitives where possible.
* Try to simulate missing primitives with indexed vertices.
*/
#define HAVE_POINTS 1
#define HAVE_LINES 1
#define HAVE_LINE_LOOP 0
#define HAVE_LINE_STRIPS 1
#define HAVE_TRIANGLES 1
#define HAVE_TRI_STRIPS 1
#define HAVE_TRI_FANS 1
#define HAVE_QUADS 0
#define HAVE_QUAD_STRIPS 0
#define HAVE_POLYGONS 1
#define HAVE_ELTS 1
#define HW_POINTS RADEON_CP_VC_CNTL_PRIM_TYPE_POINT
#define HW_LINES RADEON_CP_VC_CNTL_PRIM_TYPE_LINE
#define HW_LINE_LOOP 0
#define HW_LINE_STRIP RADEON_CP_VC_CNTL_PRIM_TYPE_LINE_STRIP
#define HW_TRIANGLES RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST
#define HW_TRIANGLE_STRIP_0 RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_STRIP
#define HW_TRIANGLE_STRIP_1 0
#define HW_TRIANGLE_FAN RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_FAN
#define HW_QUADS 0
#define HW_QUAD_STRIP 0
#define HW_POLYGON RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_FAN
static GLboolean discrete_prim[0x10] = {
0, /* 0 none */
1, /* 1 points */
1, /* 2 lines */
0, /* 3 line_strip */
1, /* 4 tri_list */
0, /* 5 tri_fan */
0, /* 6 tri_type2 */
1, /* 7 rect list (unused) */
1, /* 8 3vert point */
1, /* 9 3vert line */
0,
0,
0,
0,
0,
0,
};
#define LOCAL_VARS r100ContextPtr rmesa = R100_CONTEXT(ctx)
#define ELT_TYPE GLushort
#define ELT_INIT(prim, hw_prim) \
radeonTclPrimitive( ctx, prim, hw_prim | RADEON_CP_VC_CNTL_PRIM_WALK_IND )
#define GET_MESA_ELTS() rmesa->tcl.Elts
/* Don't really know how many elts will fit in what's left of cmdbuf,
* as there is state to emit, etc:
*/
/* Testing on isosurf shows a maximum around here. Don't know if it's
* the card or driver or kernel module that is causing the behaviour.
*/
#define GET_MAX_HW_ELTS() 300
#define RESET_STIPPLE() do { \
RADEON_STATECHANGE( rmesa, lin ); \
radeonEmitState(&rmesa->radeon); \
} while (0)
#define AUTO_STIPPLE( mode ) do { \
RADEON_STATECHANGE( rmesa, lin ); \
if (mode) \
rmesa->hw.lin.cmd[LIN_RE_LINE_PATTERN] |= \
RADEON_LINE_PATTERN_AUTO_RESET; \
else \
rmesa->hw.lin.cmd[LIN_RE_LINE_PATTERN] &= \
~RADEON_LINE_PATTERN_AUTO_RESET; \
radeonEmitState(&rmesa->radeon); \
} while (0)
#define ALLOC_ELTS(nr) radeonAllocElts( rmesa, nr )
static GLushort *radeonAllocElts( r100ContextPtr rmesa, GLuint nr )
{
if (rmesa->radeon.dma.flush)
rmesa->radeon.dma.flush( &rmesa->radeon.glCtx );
radeonEmitAOS( rmesa,
rmesa->radeon.tcl.aos_count, 0 );
return radeonAllocEltsOpenEnded( rmesa, rmesa->tcl.vertex_format,
rmesa->tcl.hw_primitive, nr );
}
#define CLOSE_ELTS() if (0) RADEON_NEWPRIM( rmesa )
/* TODO: Try to extend existing primitive if both are identical,
* discrete and there are no intervening state changes. (Somewhat
* duplicates changes to DrawArrays code)
*/
static void radeonEmitPrim( struct gl_context *ctx,
GLenum prim,
GLuint hwprim,
GLuint start,
GLuint count)
{
r100ContextPtr rmesa = R100_CONTEXT( ctx );
radeonTclPrimitive( ctx, prim, hwprim );
radeonEmitAOS( rmesa,
rmesa->radeon.tcl.aos_count,
start );
/* Why couldn't this packet have taken an offset param?
*/
radeonEmitVbufPrim( rmesa,
rmesa->tcl.vertex_format,
rmesa->tcl.hw_primitive,
count - start );
}
#define EMIT_PRIM( ctx, prim, hwprim, start, count ) do { \
radeonEmitPrim( ctx, prim, hwprim, start, count ); \
(void) rmesa; } while (0)
#define MAX_CONVERSION_SIZE 40
/* Try & join small primitives
*/
#if 0
#define PREFER_DISCRETE_ELT_PRIM( NR, PRIM ) 0
#else
#define PREFER_DISCRETE_ELT_PRIM( NR, PRIM ) \
((NR) < 20 || \
((NR) < 40 && \
rmesa->tcl.hw_primitive == (PRIM| \
RADEON_CP_VC_CNTL_PRIM_WALK_IND| \
RADEON_CP_VC_CNTL_TCL_ENABLE)))
#endif
#ifdef MESA_BIG_ENDIAN
/* We could do without (most of) this ugliness if dest was always 32 bit word aligned... */
#define EMIT_ELT(dest, offset, x) do { \
int off = offset + ( ( (uintptr_t)dest & 0x2 ) >> 1 ); \
GLushort *des = (GLushort *)( (uintptr_t)dest & ~0x2 ); \
(des)[ off + 1 - 2 * ( off & 1 ) ] = (GLushort)(x); \
(void)rmesa; } while (0)
#else
#define EMIT_ELT(dest, offset, x) do { \
(dest)[offset] = (GLushort) (x); \
(void)rmesa; } while (0)
#endif
#define EMIT_TWO_ELTS(dest, offset, x, y) *(GLuint *)(dest+offset) = ((y)<<16)|(x);
#define TAG(x) tcl_##x
#include "tnl_dd/t_dd_dmatmp2.h"
/**********************************************************************/
/* External entrypoints */
/**********************************************************************/
void radeonEmitPrimitive( struct gl_context *ctx,
GLuint first,
GLuint last,
GLuint flags )
{
tcl_render_tab_verts[flags&PRIM_MODE_MASK]( ctx, first, last, flags );
}
void radeonEmitEltPrimitive( struct gl_context *ctx,
GLuint first,
GLuint last,
GLuint flags )
{
tcl_render_tab_elts[flags&PRIM_MODE_MASK]( ctx, first, last, flags );
}
void radeonTclPrimitive( struct gl_context *ctx,
GLenum prim,
int hw_prim )
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
GLuint se_cntl;
GLuint newprim = hw_prim | RADEON_CP_VC_CNTL_TCL_ENABLE;
radeon_prepare_render(&rmesa->radeon);
if (rmesa->radeon.NewGLState)
radeonValidateState( ctx );
if (newprim != rmesa->tcl.hw_primitive ||
!discrete_prim[hw_prim&0xf]) {
RADEON_NEWPRIM( rmesa );
rmesa->tcl.hw_primitive = newprim;
}
se_cntl = rmesa->hw.set.cmd[SET_SE_CNTL];
se_cntl &= ~RADEON_FLAT_SHADE_VTX_LAST;
if (prim == GL_POLYGON && ctx->Light.ShadeModel == GL_FLAT)
se_cntl |= RADEON_FLAT_SHADE_VTX_0;
else
se_cntl |= RADEON_FLAT_SHADE_VTX_LAST;
if (se_cntl != rmesa->hw.set.cmd[SET_SE_CNTL]) {
RADEON_STATECHANGE( rmesa, set );
rmesa->hw.set.cmd[SET_SE_CNTL] = se_cntl;
}
}
/**
* Predict total emit size for next rendering operation so there is no flush in middle of rendering
* Prediction has to aim towards the best possible value that is worse than worst case scenario
*/
static GLuint radeonEnsureEmitSize( struct gl_context * ctx , GLuint inputs )
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
TNLcontext *tnl = TNL_CONTEXT(ctx);
struct vertex_buffer *VB = &tnl->vb;
GLuint space_required;
GLuint state_size;
GLuint nr_aos = 1; /* radeonEmitArrays does always emit one */
int i;
/* list of flags that are allocating aos object */
const GLuint flags_to_check[] = {
VERT_BIT_NORMAL,
VERT_BIT_COLOR0,
VERT_BIT_COLOR1,
VERT_BIT_FOG
};
/* predict number of aos to emit */
for (i=0; i < ARRAY_SIZE(flags_to_check); ++i)
{
if (inputs & flags_to_check[i])
++nr_aos;
}
for (i = 0; i < ctx->Const.MaxTextureUnits; ++i)
{
if (inputs & VERT_BIT_TEX(i))
++nr_aos;
}
{
/* count the prediction for state size */
space_required = 0;
state_size = radeonCountStateEmitSize( &rmesa->radeon );
/* tcl may be changed in radeonEmitArrays so account for it if not dirty */
if (!rmesa->hw.tcl.dirty)
state_size += rmesa->hw.tcl.check( &rmesa->radeon.glCtx, &rmesa->hw.tcl );
/* predict size for elements */
for (i = 0; i < VB->PrimitiveCount; ++i)
{
/* If primitive.count is less than MAX_CONVERSION_SIZE
rendering code may decide convert to elts.
In that case we have to make pessimistic prediction.
and use larger of 2 paths. */
const GLuint elts = ELTS_BUFSZ(nr_aos);
const GLuint index = INDEX_BUFSZ;
const GLuint vbuf = VBUF_BUFSZ;
if (!VB->Primitive[i].count)
continue;
if ( (!VB->Elts && VB->Primitive[i].count >= MAX_CONVERSION_SIZE)
|| vbuf > index + elts)
space_required += vbuf;
else
space_required += index + elts;
space_required += VB->Primitive[i].count * 3;
space_required += AOS_BUFSZ(nr_aos);
}
space_required += SCISSOR_BUFSZ;
}
/* flush the buffer in case we need more than is left. */
if (rcommonEnsureCmdBufSpace(&rmesa->radeon, space_required, __func__))
return space_required + radeonCountStateEmitSize( &rmesa->radeon );
else
return space_required + state_size;
}
/**********************************************************************/
/* Render pipeline stage */
/**********************************************************************/
/* TCL render.
*/
static GLboolean radeon_run_tcl_render( struct gl_context *ctx,
struct tnl_pipeline_stage *stage )
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
TNLcontext *tnl = TNL_CONTEXT(ctx);
struct vertex_buffer *VB = &tnl->vb;
GLuint inputs = VERT_BIT_POS | VERT_BIT_COLOR0;
GLuint i;
GLuint emit_end;
/* TODO: separate this from the swtnl pipeline
*/
if (rmesa->radeon.TclFallback)
return GL_TRUE; /* fallback to software t&l */
if (VB->Count == 0)
return GL_FALSE;
/* NOTE: inputs != tnl->render_inputs - these are the untransformed
* inputs.
*/
if (ctx->Light.Enabled) {
inputs |= VERT_BIT_NORMAL;
}
if (_mesa_need_secondary_color(ctx)) {
inputs |= VERT_BIT_COLOR1;
}
if ( (ctx->Fog.FogCoordinateSource == GL_FOG_COORD) && ctx->Fog.Enabled ) {
inputs |= VERT_BIT_FOG;
}
for (i = 0 ; i < ctx->Const.MaxTextureUnits; i++) {
if (ctx->Texture.Unit[i]._Current) {
/* TODO: probably should not emit texture coords when texgen is enabled */
if (rmesa->TexGenNeedNormals[i]) {
inputs |= VERT_BIT_NORMAL;
}
inputs |= VERT_BIT_TEX(i);
}
}
radeonReleaseArrays( ctx, ~0 );
emit_end = radeonEnsureEmitSize( ctx, inputs )
+ rmesa->radeon.cmdbuf.cs->cdw;
radeonEmitArrays( ctx, inputs );
rmesa->tcl.Elts = VB->Elts;
for (i = 0 ; i < VB->PrimitiveCount ; i++)
{
GLuint prim = _tnl_translate_prim(&VB->Primitive[i]);
GLuint start = VB->Primitive[i].start;
GLuint length = VB->Primitive[i].count;
if (!length)
continue;
if (rmesa->tcl.Elts)
radeonEmitEltPrimitive( ctx, start, start+length, prim );
else
radeonEmitPrimitive( ctx, start, start+length, prim );
}
if (emit_end < rmesa->radeon.cmdbuf.cs->cdw)
WARN_ONCE("Rendering was %d commands larger than predicted size."
" We might overflow command buffer.\n", rmesa->radeon.cmdbuf.cs->cdw - emit_end);
return GL_FALSE; /* finished the pipe */
}
/* Initial state for tcl stage.
*/
const struct tnl_pipeline_stage _radeon_tcl_stage =
{
"radeon render",
NULL,
NULL,
NULL,
NULL,
radeon_run_tcl_render /* run */
};
/**********************************************************************/
/* Validate state at pipeline start */
/**********************************************************************/
/*-----------------------------------------------------------------------
* Manage TCL fallbacks
*/
static void transition_to_swtnl( struct gl_context *ctx )
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
TNLcontext *tnl = TNL_CONTEXT(ctx);
GLuint se_cntl;
RADEON_NEWPRIM( rmesa );
rmesa->swtcl.vertex_format = 0;
radeonChooseVertexState( ctx );
radeonChooseRenderState( ctx );
_tnl_validate_shine_tables( ctx );
tnl->Driver.NotifyMaterialChange =
_tnl_validate_shine_tables;
radeonReleaseArrays( ctx, ~0 );
se_cntl = rmesa->hw.set.cmd[SET_SE_CNTL];
se_cntl |= RADEON_FLAT_SHADE_VTX_LAST;
if (se_cntl != rmesa->hw.set.cmd[SET_SE_CNTL]) {
RADEON_STATECHANGE( rmesa, set );
rmesa->hw.set.cmd[SET_SE_CNTL] = se_cntl;
}
}
static void transition_to_hwtnl( struct gl_context *ctx )
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
TNLcontext *tnl = TNL_CONTEXT(ctx);
GLuint se_coord_fmt = rmesa->hw.set.cmd[SET_SE_COORDFMT];
se_coord_fmt &= ~(RADEON_VTX_XY_PRE_MULT_1_OVER_W0 |
RADEON_VTX_Z_PRE_MULT_1_OVER_W0 |
RADEON_VTX_W0_IS_NOT_1_OVER_W0);
se_coord_fmt |= RADEON_VTX_W0_IS_NOT_1_OVER_W0;
if ( se_coord_fmt != rmesa->hw.set.cmd[SET_SE_COORDFMT] ) {
RADEON_STATECHANGE( rmesa, set );
rmesa->hw.set.cmd[SET_SE_COORDFMT] = se_coord_fmt;
_tnl_need_projected_coords( ctx, GL_FALSE );
}
radeonUpdateMaterial( ctx );
tnl->Driver.NotifyMaterialChange = radeonUpdateMaterial;
if ( rmesa->radeon.dma.flush )
rmesa->radeon.dma.flush( &rmesa->radeon.glCtx );
rmesa->radeon.dma.flush = NULL;
rmesa->swtcl.vertex_format = 0;
// if (rmesa->swtcl.indexed_verts.buf)
// radeonReleaseDmaRegion( rmesa, &rmesa->swtcl.indexed_verts,
// __func__ );
if (RADEON_DEBUG & RADEON_FALLBACKS)
fprintf(stderr, "Radeon end tcl fallback\n");
}
static char *fallbackStrings[] = {
"Rasterization fallback",
"Unfilled triangles",
"Twosided lighting, differing materials",
"Materials in VB (maybe between begin/end)",
"Texgen unit 0",
"Texgen unit 1",
"Texgen unit 2",
"User disable",
"Fogcoord with separate specular lighting"
};
static char *getFallbackString(GLuint bit)
{
int i = 0;
while (bit > 1) {
i++;
bit >>= 1;
}
return fallbackStrings[i];
}
void radeonTclFallback( struct gl_context *ctx, GLuint bit, GLboolean mode )
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
GLuint oldfallback = rmesa->radeon.TclFallback;
if (mode) {
rmesa->radeon.TclFallback |= bit;
if (oldfallback == 0) {
if (RADEON_DEBUG & RADEON_FALLBACKS)
fprintf(stderr, "Radeon begin tcl fallback %s\n",
getFallbackString( bit ));
transition_to_swtnl( ctx );
}
}
else {
rmesa->radeon.TclFallback &= ~bit;
if (oldfallback == bit) {
if (RADEON_DEBUG & RADEON_FALLBACKS)
fprintf(stderr, "Radeon end tcl fallback %s\n",
getFallbackString( bit ));
transition_to_hwtnl( ctx );
}
}
}

View File

@ -1,64 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VMware, Inc.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Keith Whitwell <keithw@vmware.com>
*
*/
#ifndef __RADEON_TCL_H__
#define __RADEON_TCL_H__
#include "radeon_context.h"
extern void radeonTclPrimitive( struct gl_context *ctx, GLenum prim, int hw_prim );
extern void radeonEmitEltPrimitive( struct gl_context *ctx, GLuint first, GLuint last,
GLuint flags );
extern void radeonEmitPrimitive( struct gl_context *ctx, GLuint first, GLuint last,
GLuint flags );
extern void radeonTclFallback( struct gl_context *ctx, GLuint bit, GLboolean mode );
#define RADEON_TCL_FALLBACK_RASTER 0x1 /* rasterization */
#define RADEON_TCL_FALLBACK_UNFILLED 0x2 /* unfilled tris */
#define RADEON_TCL_FALLBACK_LIGHT_TWOSIDE 0x4 /* twoside tris */
#define RADEON_TCL_FALLBACK_MATERIAL 0x8 /* material in vb */
#define RADEON_TCL_FALLBACK_TEXGEN_0 0x10 /* texgen, unit 0 */
#define RADEON_TCL_FALLBACK_TEXGEN_1 0x20 /* texgen, unit 1 */
#define RADEON_TCL_FALLBACK_TEXGEN_2 0x40 /* texgen, unit 2 */
#define RADEON_TCL_FALLBACK_TCL_DISABLE 0x80 /* user disable */
#define RADEON_TCL_FALLBACK_FOGCOORDSPEC 0x100 /* fogcoord, sep. spec light */
/* max maos_verts vertex format has a size of 18 floats */
#define RADEON_MAX_TCL_VERTSIZE (18*4)
#define TCL_FALLBACK( ctx, bit, mode ) radeonTclFallback( ctx, bit, mode )
#endif

View File

@ -1,453 +0,0 @@
/*
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VA Linux Systems Inc., Fremont, California.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* Authors:
* Gareth Hughes <gareth@valinux.com>
* Brian Paul <brianp@valinux.com>
*/
#include "main/glheader.h"
#include "main/context.h"
#include "main/enums.h"
#include "main/image.h"
#include "main/teximage.h"
#include "main/texobj.h"
#include "radeon_context.h"
#include "radeon_mipmap_tree.h"
#include "radeon_ioctl.h"
#include "radeon_tex.h"
#include "util/u_memory.h"
#include "util/driconf.h"
/**
* Set the texture wrap modes.
*
* \param t Texture object whose wrap modes are to be set
* \param swrap Wrap mode for the \a s texture coordinate
* \param twrap Wrap mode for the \a t texture coordinate
*/
static void radeonSetTexWrap( radeonTexObjPtr t, GLenum swrap, GLenum twrap )
{
GLboolean is_clamp = GL_FALSE;
GLboolean is_clamp_to_border = GL_FALSE;
t->pp_txfilter &= ~(RADEON_CLAMP_S_MASK | RADEON_CLAMP_T_MASK | RADEON_BORDER_MODE_D3D);
switch ( swrap ) {
case GL_REPEAT:
t->pp_txfilter |= RADEON_CLAMP_S_WRAP;
break;
case GL_CLAMP:
t->pp_txfilter |= RADEON_CLAMP_S_CLAMP_GL;
is_clamp = GL_TRUE;
break;
case GL_CLAMP_TO_EDGE:
t->pp_txfilter |= RADEON_CLAMP_S_CLAMP_LAST;
break;
case GL_CLAMP_TO_BORDER:
t->pp_txfilter |= RADEON_CLAMP_S_CLAMP_GL;
is_clamp_to_border = GL_TRUE;
break;
case GL_MIRRORED_REPEAT:
t->pp_txfilter |= RADEON_CLAMP_S_MIRROR;
break;
case GL_MIRROR_CLAMP_EXT:
t->pp_txfilter |= RADEON_CLAMP_S_MIRROR_CLAMP_GL;
is_clamp = GL_TRUE;
break;
case GL_MIRROR_CLAMP_TO_EDGE_EXT:
t->pp_txfilter |= RADEON_CLAMP_S_MIRROR_CLAMP_LAST;
break;
case GL_MIRROR_CLAMP_TO_BORDER_EXT:
t->pp_txfilter |= RADEON_CLAMP_S_MIRROR_CLAMP_GL;
is_clamp_to_border = GL_TRUE;
break;
default:
_mesa_problem(NULL, "bad S wrap mode in %s", __func__);
}
if (t->base.Target != GL_TEXTURE_1D) {
switch ( twrap ) {
case GL_REPEAT:
t->pp_txfilter |= RADEON_CLAMP_T_WRAP;
break;
case GL_CLAMP:
t->pp_txfilter |= RADEON_CLAMP_T_CLAMP_GL;
is_clamp = GL_TRUE;
break;
case GL_CLAMP_TO_EDGE:
t->pp_txfilter |= RADEON_CLAMP_T_CLAMP_LAST;
break;
case GL_CLAMP_TO_BORDER:
t->pp_txfilter |= RADEON_CLAMP_T_CLAMP_GL;
is_clamp_to_border = GL_TRUE;
break;
case GL_MIRRORED_REPEAT:
t->pp_txfilter |= RADEON_CLAMP_T_MIRROR;
break;
case GL_MIRROR_CLAMP_EXT:
t->pp_txfilter |= RADEON_CLAMP_T_MIRROR_CLAMP_GL;
is_clamp = GL_TRUE;
break;
case GL_MIRROR_CLAMP_TO_EDGE_EXT:
t->pp_txfilter |= RADEON_CLAMP_T_MIRROR_CLAMP_LAST;
break;
case GL_MIRROR_CLAMP_TO_BORDER_EXT:
t->pp_txfilter |= RADEON_CLAMP_T_MIRROR_CLAMP_GL;
is_clamp_to_border = GL_TRUE;
break;
default:
_mesa_problem(NULL, "bad T wrap mode in %s", __func__);
}
}
if ( is_clamp_to_border ) {
t->pp_txfilter |= RADEON_BORDER_MODE_D3D;
}
t->border_fallback = (is_clamp && is_clamp_to_border);
}
static void radeonSetTexMaxAnisotropy( radeonTexObjPtr t, GLfloat max )
{
t->pp_txfilter &= ~RADEON_MAX_ANISO_MASK;
if ( max == 1.0 ) {
t->pp_txfilter |= RADEON_MAX_ANISO_1_TO_1;
} else if ( max <= 2.0 ) {
t->pp_txfilter |= RADEON_MAX_ANISO_2_TO_1;
} else if ( max <= 4.0 ) {
t->pp_txfilter |= RADEON_MAX_ANISO_4_TO_1;
} else if ( max <= 8.0 ) {
t->pp_txfilter |= RADEON_MAX_ANISO_8_TO_1;
} else {
t->pp_txfilter |= RADEON_MAX_ANISO_16_TO_1;
}
}
/**
* Set the texture magnification and minification modes.
*
* \param t Texture whose filter modes are to be set
* \param minf Texture minification mode
* \param magf Texture magnification mode
*/
static void radeonSetTexFilter( radeonTexObjPtr t, GLenum minf, GLenum magf )
{
GLuint anisotropy = (t->pp_txfilter & RADEON_MAX_ANISO_MASK);
/* Force revalidation to account for switches from/to mipmapping. */
t->validated = GL_FALSE;
t->pp_txfilter &= ~(RADEON_MIN_FILTER_MASK | RADEON_MAG_FILTER_MASK);
/* r100 chips can't handle mipmaps/aniso for cubemap/volume textures */
if ( t->base.Target == GL_TEXTURE_CUBE_MAP ) {
switch ( minf ) {
case GL_NEAREST:
case GL_NEAREST_MIPMAP_NEAREST:
case GL_NEAREST_MIPMAP_LINEAR:
t->pp_txfilter |= RADEON_MIN_FILTER_NEAREST;
break;
case GL_LINEAR:
case GL_LINEAR_MIPMAP_NEAREST:
case GL_LINEAR_MIPMAP_LINEAR:
t->pp_txfilter |= RADEON_MIN_FILTER_LINEAR;
break;
default:
break;
}
}
else if ( anisotropy == RADEON_MAX_ANISO_1_TO_1 ) {
switch ( minf ) {
case GL_NEAREST:
t->pp_txfilter |= RADEON_MIN_FILTER_NEAREST;
break;
case GL_LINEAR:
t->pp_txfilter |= RADEON_MIN_FILTER_LINEAR;
break;
case GL_NEAREST_MIPMAP_NEAREST:
t->pp_txfilter |= RADEON_MIN_FILTER_NEAREST_MIP_NEAREST;
break;
case GL_NEAREST_MIPMAP_LINEAR:
t->pp_txfilter |= RADEON_MIN_FILTER_LINEAR_MIP_NEAREST;
break;
case GL_LINEAR_MIPMAP_NEAREST:
t->pp_txfilter |= RADEON_MIN_FILTER_NEAREST_MIP_LINEAR;
break;
case GL_LINEAR_MIPMAP_LINEAR:
t->pp_txfilter |= RADEON_MIN_FILTER_LINEAR_MIP_LINEAR;
break;
}
} else {
switch ( minf ) {
case GL_NEAREST:
t->pp_txfilter |= RADEON_MIN_FILTER_ANISO_NEAREST;
break;
case GL_LINEAR:
t->pp_txfilter |= RADEON_MIN_FILTER_ANISO_LINEAR;
break;
case GL_NEAREST_MIPMAP_NEAREST:
case GL_LINEAR_MIPMAP_NEAREST:
t->pp_txfilter |= RADEON_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST;
break;
case GL_NEAREST_MIPMAP_LINEAR:
case GL_LINEAR_MIPMAP_LINEAR:
t->pp_txfilter |= RADEON_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR;
break;
}
}
switch ( magf ) {
case GL_NEAREST:
t->pp_txfilter |= RADEON_MAG_FILTER_NEAREST;
break;
case GL_LINEAR:
t->pp_txfilter |= RADEON_MAG_FILTER_LINEAR;
break;
}
}
static void radeonSetTexBorderColor( radeonTexObjPtr t, const GLfloat color[4] )
{
GLubyte c[4];
CLAMPED_FLOAT_TO_UBYTE(c[0], color[0]);
CLAMPED_FLOAT_TO_UBYTE(c[1], color[1]);
CLAMPED_FLOAT_TO_UBYTE(c[2], color[2]);
CLAMPED_FLOAT_TO_UBYTE(c[3], color[3]);
t->pp_border_color = radeonPackColor( 4, c[0], c[1], c[2], c[3] );
}
#define SCALED_FLOAT_TO_BYTE( x, scale ) \
(((GLuint)((255.0F / scale) * (x))) / 2)
static void radeonTexEnv( struct gl_context *ctx, GLenum target,
GLenum pname, const GLfloat *param )
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
GLuint unit = ctx->Texture.CurrentUnit;
struct gl_fixedfunc_texture_unit *texUnit =
&ctx->Texture.FixedFuncUnit[unit];
if ( RADEON_DEBUG & RADEON_STATE ) {
fprintf( stderr, "%s( %s )\n",
__func__, _mesa_enum_to_string( pname ) );
}
switch ( pname ) {
case GL_TEXTURE_ENV_COLOR: {
GLubyte c[4];
GLuint envColor;
_mesa_unclamped_float_rgba_to_ubyte(c, texUnit->EnvColor);
envColor = radeonPackColor( 4, c[0], c[1], c[2], c[3] );
if ( rmesa->hw.tex[unit].cmd[TEX_PP_TFACTOR] != envColor ) {
RADEON_STATECHANGE( rmesa, tex[unit] );
rmesa->hw.tex[unit].cmd[TEX_PP_TFACTOR] = envColor;
}
break;
}
case GL_TEXTURE_LOD_BIAS_EXT: {
GLfloat bias, min;
GLuint b;
/* The Radeon's LOD bias is a signed 2's complement value with a
* range of -1.0 <= bias < 4.0. We break this into two linear
* functions, one mapping [-1.0,0.0] to [-128,0] and one mapping
* [0.0,4.0] to [0,127].
*/
min = driQueryOptionb (&rmesa->radeon.optionCache, "no_neg_lod_bias") ?
0.0 : -1.0;
bias = CLAMP( *param, min, 4.0 );
if ( bias == 0 ) {
b = 0;
} else if ( bias > 0 ) {
b = ((GLuint)SCALED_FLOAT_TO_BYTE( bias, 4.0 )) << RADEON_LOD_BIAS_SHIFT;
} else {
b = ((GLuint)SCALED_FLOAT_TO_BYTE( bias, 1.0 )) << RADEON_LOD_BIAS_SHIFT;
}
if ( (rmesa->hw.tex[unit].cmd[TEX_PP_TXFILTER] & RADEON_LOD_BIAS_MASK) != b ) {
RADEON_STATECHANGE( rmesa, tex[unit] );
rmesa->hw.tex[unit].cmd[TEX_PP_TXFILTER] &= ~RADEON_LOD_BIAS_MASK;
rmesa->hw.tex[unit].cmd[TEX_PP_TXFILTER] |= (b & RADEON_LOD_BIAS_MASK);
}
break;
}
default:
return;
}
}
void radeonTexUpdateParameters(struct gl_context *ctx, GLuint unit)
{
struct gl_sampler_object *samp = _mesa_get_samplerobj(ctx, unit);
radeonTexObj* t = radeon_tex_obj(ctx->Texture.Unit[unit]._Current);
radeonSetTexMaxAnisotropy(t , samp->Attrib.MaxAnisotropy);
radeonSetTexFilter(t, samp->Attrib.MinFilter, samp->Attrib.MagFilter);
radeonSetTexWrap(t, samp->Attrib.WrapS, samp->Attrib.WrapT);
radeonSetTexBorderColor(t, samp->Attrib.state.border_color.f);
}
/**
* Changes variables and flags for a state update, which will happen at the
* next UpdateTextureState
*/
static void radeonTexParameter( struct gl_context *ctx,
struct gl_texture_object *texObj,
GLenum pname )
{
radeonTexObj* t = radeon_tex_obj(texObj);
radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, "%s( %s )\n", __func__,
_mesa_enum_to_string( pname ) );
switch ( pname ) {
case GL_ALL_ATTRIB_BITS: /* meaning is all pnames, internal */
case GL_TEXTURE_BASE_LEVEL:
case GL_TEXTURE_MAX_LEVEL:
case GL_TEXTURE_MIN_LOD:
case GL_TEXTURE_MAX_LOD:
t->validated = GL_FALSE;
break;
default:
return;
}
}
static void radeonDeleteTexture( struct gl_context *ctx,
struct gl_texture_object *texObj )
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
radeonTexObj* t = radeon_tex_obj(texObj);
int i;
radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
"%s( %p (target = %s) )\n", __func__, (void *)texObj,
_mesa_enum_to_string( texObj->Target ) );
if ( rmesa ) {
radeon_firevertices(&rmesa->radeon);
for ( i = 0 ; i < rmesa->radeon.glCtx.Const.MaxTextureUnits ; i++ ) {
if ( t == rmesa->state.texture.unit[i].texobj ) {
rmesa->state.texture.unit[i].texobj = NULL;
rmesa->hw.tex[i].dirty = GL_FALSE;
rmesa->hw.cube[i].dirty = GL_FALSE;
}
}
}
radeon_miptree_unreference(&t->mt);
/* Free mipmap images and the texture object itself */
_mesa_delete_texture_object(ctx, texObj);
}
/* Need:
* - Same GEN_MODE for all active bits
* - Same EyePlane/ObjPlane for all active bits when using Eye/Obj
* - STRQ presumably all supported (matrix means incoming R values
* can end up in STQ, this has implications for vertex support,
* presumably ok if maos is used, though?)
*
* Basically impossible to do this on the fly - just collect some
* basic info & do the checks from ValidateState().
*/
static void radeonTexGen( struct gl_context *ctx,
GLenum coord,
GLenum pname,
const GLfloat *params )
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
GLuint unit = ctx->Texture.CurrentUnit;
rmesa->recheck_texgen[unit] = GL_TRUE;
}
/**
* Allocate a new texture object.
* Called via ctx->Driver.NewTextureObject.
* Note: we could use containment here to 'derive' the driver-specific
* texture object from the core mesa gl_texture_object. Not done at this time.
*/
static struct gl_texture_object *
radeonNewTextureObject( struct gl_context *ctx, GLuint name, GLenum target )
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
radeonTexObj* t = CALLOC_STRUCT(radeon_tex_obj);
_mesa_initialize_texture_object(ctx, &t->base, name, target);
t->base.Sampler.Attrib.MaxAnisotropy = rmesa->radeon.initialMaxAnisotropy;
t->border_fallback = GL_FALSE;
t->pp_txfilter = RADEON_BORDER_MODE_OGL;
t->pp_txformat = (RADEON_TXFORMAT_ENDIAN_NO_SWAP |
RADEON_TXFORMAT_PERSPECTIVE_ENABLE);
radeonSetTexWrap( t, t->base.Sampler.Attrib.WrapS, t->base.Sampler.Attrib.WrapT );
radeonSetTexMaxAnisotropy( t, t->base.Sampler.Attrib.MaxAnisotropy );
radeonSetTexFilter( t, t->base.Sampler.Attrib.MinFilter, t->base.Sampler.Attrib.MagFilter );
radeonSetTexBorderColor( t, t->base.Sampler.Attrib.state.border_color.f );
return &t->base;
}
static struct gl_sampler_object *
radeonNewSamplerObject(struct gl_context *ctx, GLuint name)
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
struct gl_sampler_object *samp = _mesa_new_sampler_object(ctx, name);
if (samp)
samp->Attrib.MaxAnisotropy = rmesa->radeon.initialMaxAnisotropy;
return samp;
}
void radeonInitTextureFuncs( radeonContextPtr radeon, struct dd_function_table *functions )
{
radeon_init_common_texture_funcs(radeon, functions);
functions->NewTextureObject = radeonNewTextureObject;
// functions->BindTexture = radeonBindTexture;
functions->DeleteTexture = radeonDeleteTexture;
functions->TexEnv = radeonTexEnv;
functions->TexParameter = radeonTexParameter;
functions->TexGen = radeonTexGen;
functions->NewSamplerObject = radeonNewSamplerObject;
}

View File

@ -1,90 +0,0 @@
/**************************************************************************
Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
VA Linux Systems Inc., Fremont, California.
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/*
* Authors:
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#ifndef __RADEON_TEX_H__
#define __RADEON_TEX_H__
extern void radeonSetTexBuffer(__DRIcontext *pDRICtx, GLint target, __DRIdrawable *dPriv);
extern void radeonSetTexBuffer2(__DRIcontext *pDRICtx, GLint target, GLint glx_texture_format,
__DRIdrawable *dPriv);
extern void radeonUpdateTextureState( struct gl_context *ctx );
extern int radeonUploadTexImages( r100ContextPtr rmesa, radeonTexObjPtr t,
GLuint face );
extern void radeonDestroyTexObj( r100ContextPtr rmesa, radeonTexObjPtr t );
extern void radeonTexUpdateParameters(struct gl_context *ctx, GLuint unit);
extern void radeonInitTextureFuncs( radeonContextPtr radeon, struct dd_function_table *functions );
struct tx_table {
GLuint format, filter;
};
/* XXX verify this table against MESA_FORMAT_x values */
static const struct tx_table tx_table[] =
{
[ MESA_FORMAT_NONE ] = { 0xffffffff, 0 },
[ MESA_FORMAT_A8B8G8R8_UNORM ] = { RADEON_TXFORMAT_RGBA8888 | RADEON_TXFORMAT_ALPHA_IN_MAP, 0 },
[ MESA_FORMAT_R8G8B8A8_UNORM ] = { RADEON_TXFORMAT_RGBA8888 | RADEON_TXFORMAT_ALPHA_IN_MAP, 0 },
[ MESA_FORMAT_B8G8R8A8_UNORM ] = { RADEON_TXFORMAT_ARGB8888 | RADEON_TXFORMAT_ALPHA_IN_MAP, 0 },
[ MESA_FORMAT_A8R8G8B8_UNORM ] = { RADEON_TXFORMAT_ARGB8888 | RADEON_TXFORMAT_ALPHA_IN_MAP, 0 },
[ MESA_FORMAT_B8G8R8X8_UNORM ] = { RADEON_TXFORMAT_ARGB8888, 0 },
[ MESA_FORMAT_X8R8G8B8_UNORM ] = { RADEON_TXFORMAT_ARGB8888, 0 },
[ MESA_FORMAT_BGR_UNORM8 ] = { RADEON_TXFORMAT_ARGB8888, 0 },
[ MESA_FORMAT_B5G6R5_UNORM ] = { RADEON_TXFORMAT_RGB565, 0 },
[ MESA_FORMAT_R5G6B5_UNORM ] = { RADEON_TXFORMAT_RGB565, 0 },
[ MESA_FORMAT_B4G4R4A4_UNORM ] = { RADEON_TXFORMAT_ARGB4444 | RADEON_TXFORMAT_ALPHA_IN_MAP, 0 },
[ MESA_FORMAT_A4R4G4B4_UNORM ] = { RADEON_TXFORMAT_ARGB4444 | RADEON_TXFORMAT_ALPHA_IN_MAP, 0 },
[ MESA_FORMAT_B5G5R5A1_UNORM ] = { RADEON_TXFORMAT_ARGB1555 | RADEON_TXFORMAT_ALPHA_IN_MAP, 0 },
[ MESA_FORMAT_A1R5G5B5_UNORM ] = { RADEON_TXFORMAT_ARGB1555 | RADEON_TXFORMAT_ALPHA_IN_MAP, 0 },
[ MESA_FORMAT_LA_UNORM8 ] = { RADEON_TXFORMAT_AI88 | RADEON_TXFORMAT_ALPHA_IN_MAP, 0 },
[ MESA_FORMAT_A_UNORM8 ] = { RADEON_TXFORMAT_I8 | RADEON_TXFORMAT_ALPHA_IN_MAP, 0 },
[ MESA_FORMAT_L_UNORM8 ] = { RADEON_TXFORMAT_I8, 0 },
[ MESA_FORMAT_I_UNORM8 ] = { RADEON_TXFORMAT_I8 | RADEON_TXFORMAT_ALPHA_IN_MAP, 0 },
[ MESA_FORMAT_YCBCR ] = { RADEON_TXFORMAT_YVYU422, RADEON_YUV_TO_RGB },
[ MESA_FORMAT_YCBCR_REV ] = { RADEON_TXFORMAT_VYUY422, RADEON_YUV_TO_RGB },
[ MESA_FORMAT_RGB_FXT1 ] = { 0xffffffff, 0 },
[ MESA_FORMAT_RGBA_FXT1 ] = { 0xffffffff, 0 },
[ MESA_FORMAT_RGB_DXT1 ] = { RADEON_TXFORMAT_DXT1, 0 },
[ MESA_FORMAT_RGBA_DXT1 ] = { RADEON_TXFORMAT_DXT1 | RADEON_TXFORMAT_ALPHA_IN_MAP, 0 },
[ MESA_FORMAT_RGBA_DXT3 ] = { RADEON_TXFORMAT_DXT23 | RADEON_TXFORMAT_ALPHA_IN_MAP, 0 },
[ MESA_FORMAT_RGBA_DXT5 ] = { RADEON_TXFORMAT_DXT45 | RADEON_TXFORMAT_ALPHA_IN_MAP, 0 },
};
#endif /* __RADEON_TEX_H__ */

View File

@ -1,160 +0,0 @@
/*
* Copyright (C) 2009 Maciej Cencora <m.cencora@gmail.com>
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "radeon_common.h"
#include "radeon_texture.h"
#include "main/enums.h"
#include "main/image.h"
#include "main/teximage.h"
#include "main/texstate.h"
#include "drivers/common/meta.h"
#include "radeon_mipmap_tree.h"
static GLboolean
do_copy_texsubimage(struct gl_context *ctx,
struct radeon_tex_obj *tobj,
radeon_texture_image *timg,
GLint dstx, GLint dsty,
struct radeon_renderbuffer *rrb,
GLint x, GLint y,
GLsizei width, GLsizei height)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
const GLuint face = timg->base.Base.Face;
const GLuint level = timg->base.Base.Level;
unsigned src_bpp;
unsigned dst_bpp;
mesa_format src_mesaformat;
mesa_format dst_mesaformat;
unsigned flip_y;
if (!radeon->vtbl.blit) {
return GL_FALSE;
}
// This is software renderbuffer, fallback to swrast
if (!rrb) {
return GL_FALSE;
}
if (_mesa_get_format_bits(timg->base.Base.TexFormat, GL_DEPTH_BITS) > 0) {
/* copying depth values */
flip_y = ctx->ReadBuffer->Attachment[BUFFER_DEPTH].Type == GL_NONE;
} else {
/* copying color */
flip_y = ctx->ReadBuffer->Attachment[BUFFER_COLOR0].Type == GL_NONE;
}
if (!timg->mt) {
radeon_validate_texture_miptree(ctx, &tobj->base.Sampler, &tobj->base);
}
assert(rrb->bo);
assert(timg->mt);
assert(timg->mt->bo);
assert(timg->base.Base.Width >= dstx + width);
assert(timg->base.Base.Height >= dsty + height);
intptr_t src_offset = rrb->draw_offset;
intptr_t dst_offset = radeon_miptree_image_offset(timg->mt, face, level);
if (0) {
fprintf(stderr, "%s: copying to face %d, level %d\n",
__func__, face, level);
fprintf(stderr, "to: x %d, y %d, offset %d\n", dstx, dsty, (uint32_t) dst_offset);
fprintf(stderr, "from (%dx%d) width %d, height %d, offset %d, pitch %d\n",
x, y, rrb->base.Base.Width, rrb->base.Base.Height, (uint32_t) src_offset, rrb->pitch/rrb->cpp);
fprintf(stderr, "src size %d, dst size %d\n", rrb->bo->size, timg->mt->bo->size);
}
src_mesaformat = rrb->base.Base.Format;
dst_mesaformat = timg->base.Base.TexFormat;
src_bpp = _mesa_get_format_bytes(src_mesaformat);
dst_bpp = _mesa_get_format_bytes(dst_mesaformat);
if (!radeon->vtbl.check_blit(dst_mesaformat, rrb->pitch / rrb->cpp)) {
/* depth formats tend to be special */
if (_mesa_get_format_bits(dst_mesaformat, GL_DEPTH_BITS) > 0)
return GL_FALSE;
if (src_bpp != dst_bpp)
return GL_FALSE;
switch (dst_bpp) {
case 2:
src_mesaformat = MESA_FORMAT_B5G6R5_UNORM;
dst_mesaformat = MESA_FORMAT_B5G6R5_UNORM;
break;
case 4:
src_mesaformat = MESA_FORMAT_B8G8R8A8_UNORM;
dst_mesaformat = MESA_FORMAT_B8G8R8A8_UNORM;
break;
case 1:
src_mesaformat = MESA_FORMAT_A_UNORM8;
dst_mesaformat = MESA_FORMAT_A_UNORM8;
break;
default:
return GL_FALSE;
}
}
/* blit from src buffer to texture */
return radeon->vtbl.blit(ctx, rrb->bo, src_offset, src_mesaformat, rrb->pitch/rrb->cpp,
rrb->base.Base.Width, rrb->base.Base.Height, x, y,
timg->mt->bo, dst_offset, dst_mesaformat,
timg->mt->levels[level].rowstride / dst_bpp,
timg->base.Base.Width, timg->base.Base.Height,
dstx, dsty, width, height, flip_y);
}
void
radeonCopyTexSubImage(struct gl_context *ctx, GLuint dims,
struct gl_texture_image *texImage,
GLint xoffset, GLint yoffset, GLint slice,
struct gl_renderbuffer *rb,
GLint x, GLint y,
GLsizei width, GLsizei height)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
radeon_prepare_render(radeon);
if (slice != 0 || !do_copy_texsubimage(ctx,
radeon_tex_obj(texImage->TexObject),
(radeon_texture_image *)texImage,
xoffset, yoffset,
radeon_renderbuffer(rb), x, y, width, height)) {
radeon_print(RADEON_FALLBACKS, RADEON_NORMAL,
"Falling back to sw for glCopyTexSubImage2D\n");
_mesa_meta_CopyTexSubImage(ctx, dims, texImage,
xoffset, yoffset, slice,
rb, x, y, width, height);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,691 +0,0 @@
/*
* Copyright (C) 2009 Maciej Cencora.
* Copyright (C) 2008 Nicolai Haehnle.
* Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
*
* The Weather Channel (TM) funded Tungsten Graphics to develop the
* initial release of the Radeon 8500 driver under the XFree86 license.
* This notice must be preserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "main/glheader.h"
#include "main/context.h"
#include "main/enums.h"
#include "main/mipmap.h"
#include "main/pbo.h"
#include "main/texcompress.h"
#include "main/texstore.h"
#include "main/teximage.h"
#include "main/texobj.h"
#include "drivers/common/meta.h"
#include "util/driconf.h" /* for symbolic values of enum-type options */
#include "radeon_common.h"
#include "radeon_mipmap_tree.h"
static void teximage_assign_miptree(radeonContextPtr rmesa,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage);
static radeon_mipmap_tree *radeon_miptree_create_for_teximage(radeonContextPtr rmesa,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage);
void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
GLuint numrows, GLuint rowsize)
{
assert(rowsize <= dststride);
assert(rowsize <= srcstride);
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s dst %p, stride %u, src %p, stride %u, "
"numrows %u, rowsize %u.\n",
__func__, dst, dststride,
src, srcstride,
numrows, rowsize);
if (rowsize == srcstride && rowsize == dststride) {
memcpy(dst, src, numrows*rowsize);
} else {
GLuint i;
for(i = 0; i < numrows; ++i) {
memcpy(dst, src, rowsize);
dst += dststride;
src += srcstride;
}
}
}
/* textures */
/**
* Allocate an empty texture image object.
*/
struct gl_texture_image *radeonNewTextureImage(struct gl_context *ctx)
{
return calloc(1, sizeof(radeon_texture_image));
}
/**
* Delete a texture image object.
*/
static void
radeonDeleteTextureImage(struct gl_context *ctx, struct gl_texture_image *img)
{
/* nothing special (yet) for radeon_texture_image */
_mesa_delete_texture_image(ctx, img);
}
static GLboolean
radeonAllocTextureImageBuffer(struct gl_context *ctx,
struct gl_texture_image *timage)
{
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
struct gl_texture_object *texobj = timage->TexObject;
ctx->Driver.FreeTextureImageBuffer(ctx, timage);
if (!_swrast_init_texture_image(timage))
return GL_FALSE;
teximage_assign_miptree(rmesa, texobj, timage);
return GL_TRUE;
}
/**
* Free memory associated with this texture image.
*/
void radeonFreeTextureImageBuffer(struct gl_context *ctx, struct gl_texture_image *timage)
{
radeon_texture_image* image = get_radeon_texture_image(timage);
if (image->mt) {
radeon_miptree_unreference(&image->mt);
}
if (image->bo) {
radeon_bo_unref(image->bo);
image->bo = NULL;
}
_swrast_free_texture_image_buffer(ctx, timage);
}
/**
* Map texture memory/buffer into user space.
* Note: the region of interest parameters are ignored here.
* \param mapOut returns start of mapping of region of interest
* \param rowStrideOut returns row stride in bytes
*/
static void
radeon_map_texture_image(struct gl_context *ctx,
struct gl_texture_image *texImage,
GLuint slice,
GLuint x, GLuint y, GLuint w, GLuint h,
GLbitfield mode,
GLubyte **map,
GLint *stride)
{
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
radeon_texture_image *image = get_radeon_texture_image(texImage);
radeon_mipmap_tree *mt = image->mt;
GLuint texel_size = _mesa_get_format_bytes(texImage->TexFormat);
GLuint width = texImage->Width;
GLuint height = texImage->Height;
struct radeon_bo *bo = !image->mt ? image->bo : image->mt->bo;
unsigned int bw, bh;
GLboolean write = (mode & GL_MAP_WRITE_BIT) != 0;
_mesa_get_format_block_size(texImage->TexFormat, &bw, &bh);
assert(y % bh == 0);
y /= bh;
texel_size /= bw;
if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
"%s for texture that is "
"queued for GPU processing.\n",
__func__);
radeon_firevertices(rmesa);
}
if (image->bo) {
/* TFP case */
radeon_bo_map(image->bo, write);
*stride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0, texImage->TexObject->Target);
*map = bo->ptr;
} else if (likely(mt)) {
void *base;
radeon_mipmap_level *lvl = &image->mt->levels[texImage->Level];
radeon_bo_map(mt->bo, write);
base = mt->bo->ptr + lvl->faces[image->base.Base.Face].offset;
*stride = lvl->rowstride;
*map = base + (slice * height) * *stride;
} else {
/* texture data is in malloc'd memory */
assert(map);
*stride = _mesa_format_row_stride(texImage->TexFormat, width);
*map = image->base.Buffer + (slice * height) * *stride;
}
*map += y * *stride + x * texel_size;
}
static void
radeon_unmap_texture_image(struct gl_context *ctx,
struct gl_texture_image *texImage, GLuint slice)
{
radeon_texture_image *image = get_radeon_texture_image(texImage);
if (image->bo)
radeon_bo_unmap(image->bo);
else if (image->mt)
radeon_bo_unmap(image->mt->bo);
}
/* try to find a format which will only need a memcopy */
static mesa_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
GLenum srcFormat,
GLenum srcType, GLboolean fbo)
{
#if defined(RADEON_R100)
/* r100 can only do this */
return _radeon_texformat_argb8888;
#elif defined(RADEON_R200)
const GLuint ui = 1;
const GLubyte littleEndian = *((const GLubyte *)&ui);
/* Unfortunately, regardless the fbo flag, we might still be asked to
* attach a texture to a fbo later, which then won't succeed if we chose
* one which isn't renderable. And unlike more exotic formats, apps aren't
* really prepared for the incomplete framebuffer this results in (they'd
* have to retry with same internalFormat even, just different
* srcFormat/srcType, which can't really be expected anyway).
* Ideally, we'd defer format selection until later (if the texture is
* used as a rt it's likely there's never data uploaded to it before attached
* to a fbo), but this isn't really possible, so for now just always use
* a renderable format.
*/
if (1 || fbo)
return _radeon_texformat_argb8888;
if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
(srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
(srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
(srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
return MESA_FORMAT_A8B8G8R8_UNORM;
} else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
(srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
(srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
(srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
return MESA_FORMAT_R8G8B8A8_UNORM;
} else
return _radeon_texformat_argb8888;
#endif
}
mesa_format radeonChooseTextureFormat_mesa(struct gl_context * ctx,
GLenum target,
GLint internalFormat,
GLenum format,
GLenum type)
{
return radeonChooseTextureFormat(ctx, internalFormat, format,
type, 0);
}
mesa_format radeonChooseTextureFormat(struct gl_context * ctx,
GLint internalFormat,
GLenum format,
GLenum type, GLboolean fbo)
{
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
const GLboolean do32bpt =
(rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
const GLboolean force16bpt =
(rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
(void)format;
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s InternalFormat=%s(%d) type=%s format=%s\n",
__func__,
_mesa_enum_to_string(internalFormat), internalFormat,
_mesa_enum_to_string(type), _mesa_enum_to_string(format));
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s do32bpt=%d force16bpt=%d\n",
__func__, do32bpt, force16bpt);
switch (internalFormat) {
case 4:
case GL_RGBA:
case GL_COMPRESSED_RGBA:
switch (type) {
case GL_UNSIGNED_INT_10_10_10_2:
case GL_UNSIGNED_INT_2_10_10_10_REV:
return do32bpt ? _radeon_texformat_argb8888 :
_radeon_texformat_argb1555;
case GL_UNSIGNED_SHORT_4_4_4_4:
case GL_UNSIGNED_SHORT_4_4_4_4_REV:
return _radeon_texformat_argb4444;
case GL_UNSIGNED_SHORT_5_5_5_1:
case GL_UNSIGNED_SHORT_1_5_5_5_REV:
return _radeon_texformat_argb1555;
default:
return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
_radeon_texformat_argb4444;
}
case 3:
case GL_RGB:
case GL_COMPRESSED_RGB:
switch (type) {
case GL_UNSIGNED_SHORT_4_4_4_4:
case GL_UNSIGNED_SHORT_4_4_4_4_REV:
return _radeon_texformat_argb4444;
case GL_UNSIGNED_SHORT_5_5_5_1:
case GL_UNSIGNED_SHORT_1_5_5_5_REV:
return _radeon_texformat_argb1555;
case GL_UNSIGNED_SHORT_5_6_5:
case GL_UNSIGNED_SHORT_5_6_5_REV:
return _radeon_texformat_rgb565;
default:
return do32bpt ? _radeon_texformat_argb8888 :
_radeon_texformat_rgb565;
}
case GL_RGBA8:
case GL_RGB10_A2:
case GL_RGBA12:
case GL_RGBA16:
return !force16bpt ?
radeonChoose8888TexFormat(rmesa, format, type, fbo) :
_radeon_texformat_argb4444;
case GL_RGBA4:
case GL_RGBA2:
return _radeon_texformat_argb4444;
case GL_RGB5_A1:
return _radeon_texformat_argb1555;
case GL_RGB8:
case GL_RGB10:
case GL_RGB12:
case GL_RGB16:
return !force16bpt ? _radeon_texformat_argb8888 :
_radeon_texformat_rgb565;
case GL_RGB5:
case GL_RGB4:
case GL_R3_G3_B2:
return _radeon_texformat_rgb565;
case GL_ALPHA:
case GL_ALPHA4:
case GL_ALPHA8:
case GL_ALPHA12:
case GL_ALPHA16:
case GL_COMPRESSED_ALPHA:
#if defined(RADEON_R200)
/* r200: can't use a8 format since interpreting hw I8 as a8 would result
in wrong rgb values (same as alpha value instead of 0). */
return MESA_FORMAT_LA_UNORM8;
#else
return MESA_FORMAT_A_UNORM8;
#endif
case 1:
case GL_LUMINANCE:
case GL_LUMINANCE4:
case GL_LUMINANCE8:
case GL_LUMINANCE12:
case GL_LUMINANCE16:
case GL_COMPRESSED_LUMINANCE:
return MESA_FORMAT_L_UNORM8;
case 2:
case GL_LUMINANCE_ALPHA:
case GL_LUMINANCE4_ALPHA4:
case GL_LUMINANCE6_ALPHA2:
case GL_LUMINANCE8_ALPHA8:
case GL_LUMINANCE12_ALPHA4:
case GL_LUMINANCE12_ALPHA12:
case GL_LUMINANCE16_ALPHA16:
case GL_COMPRESSED_LUMINANCE_ALPHA:
return MESA_FORMAT_LA_UNORM8;
case GL_INTENSITY:
case GL_INTENSITY4:
case GL_INTENSITY8:
case GL_INTENSITY12:
case GL_INTENSITY16:
case GL_COMPRESSED_INTENSITY:
return MESA_FORMAT_I_UNORM8;
case GL_YCBCR_MESA:
if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
type == GL_UNSIGNED_BYTE)
return MESA_FORMAT_YCBCR;
else
return MESA_FORMAT_YCBCR_REV;
case GL_RGB_S3TC:
case GL_RGB4_S3TC:
case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
return MESA_FORMAT_RGB_DXT1;
case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
return MESA_FORMAT_RGBA_DXT1;
case GL_RGBA_S3TC:
case GL_RGBA4_S3TC:
case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
return MESA_FORMAT_RGBA_DXT3;
case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
return MESA_FORMAT_RGBA_DXT5;
case GL_ALPHA16F_ARB:
return MESA_FORMAT_A_FLOAT16;
case GL_ALPHA32F_ARB:
return MESA_FORMAT_A_FLOAT32;
case GL_LUMINANCE16F_ARB:
return MESA_FORMAT_L_FLOAT16;
case GL_LUMINANCE32F_ARB:
return MESA_FORMAT_L_FLOAT32;
case GL_LUMINANCE_ALPHA16F_ARB:
return MESA_FORMAT_LA_FLOAT16;
case GL_LUMINANCE_ALPHA32F_ARB:
return MESA_FORMAT_LA_FLOAT32;
case GL_INTENSITY16F_ARB:
return MESA_FORMAT_I_FLOAT16;
case GL_INTENSITY32F_ARB:
return MESA_FORMAT_I_FLOAT32;
case GL_RGB16F_ARB:
return MESA_FORMAT_RGBA_FLOAT16;
case GL_RGB32F_ARB:
return MESA_FORMAT_RGBA_FLOAT32;
case GL_RGBA16F_ARB:
return MESA_FORMAT_RGBA_FLOAT16;
case GL_RGBA32F_ARB:
return MESA_FORMAT_RGBA_FLOAT32;
case GL_DEPTH_COMPONENT:
case GL_DEPTH_COMPONENT16:
case GL_DEPTH_COMPONENT24:
case GL_DEPTH_COMPONENT32:
case GL_DEPTH_STENCIL_EXT:
case GL_DEPTH24_STENCIL8_EXT:
return MESA_FORMAT_Z24_UNORM_S8_UINT;
/* EXT_texture_sRGB */
case GL_SRGB:
case GL_SRGB8:
case GL_SRGB_ALPHA:
case GL_SRGB8_ALPHA8:
case GL_COMPRESSED_SRGB:
case GL_COMPRESSED_SRGB_ALPHA:
return MESA_FORMAT_B8G8R8A8_SRGB;
case GL_SLUMINANCE:
case GL_SLUMINANCE8:
case GL_COMPRESSED_SLUMINANCE:
return MESA_FORMAT_L_SRGB8;
case GL_SLUMINANCE_ALPHA:
case GL_SLUMINANCE8_ALPHA8:
case GL_COMPRESSED_SLUMINANCE_ALPHA:
return MESA_FORMAT_LA_SRGB8;
case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
return MESA_FORMAT_SRGB_DXT1;
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
return MESA_FORMAT_SRGBA_DXT1;
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
return MESA_FORMAT_SRGBA_DXT3;
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
return MESA_FORMAT_SRGBA_DXT5;
default:
_mesa_problem(ctx,
"unexpected internalFormat 0x%x in %s",
(int)internalFormat, __func__);
return MESA_FORMAT_NONE;
}
return MESA_FORMAT_NONE; /* never get here */
}
/** Check if given image is valid within current texture object.
*/
static void teximage_assign_miptree(radeonContextPtr rmesa,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage)
{
radeonTexObj *t = radeon_tex_obj(texObj);
radeon_texture_image* image = get_radeon_texture_image(texImage);
/* Try using current miptree, or create new if there isn't any */
if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage)) {
radeon_miptree_unreference(&t->mt);
t->mt = radeon_miptree_create_for_teximage(rmesa,
texObj,
texImage);
radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
"%s: texObj %p, texImage %p, "
"texObj miptree doesn't match, allocated new miptree %p\n",
__func__, texObj, texImage, t->mt);
}
/* Miptree alocation may have failed,
* when there was no image for baselevel specified */
if (t->mt) {
radeon_miptree_reference(t->mt, &image->mt);
} else
radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
"%s Failed to allocate miptree.\n", __func__);
}
unsigned radeonIsFormatRenderable(mesa_format mesa_format)
{
if (mesa_format == _radeon_texformat_argb8888 || mesa_format == _radeon_texformat_rgb565 ||
mesa_format == _radeon_texformat_argb1555 || mesa_format == _radeon_texformat_argb4444)
return 1;
switch (mesa_format)
{
case MESA_FORMAT_Z_UNORM16:
case MESA_FORMAT_Z24_UNORM_S8_UINT:
return 1;
default:
return 0;
}
}
void radeon_image_target_texture_2d(struct gl_context *ctx, GLenum target,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage,
GLeglImageOES image_handle)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
radeonTexObj *t = radeon_tex_obj(texObj);
radeon_texture_image *radeonImage = get_radeon_texture_image(texImage);
__DRIscreen *screen;
__DRIimage *image;
screen = radeon->radeonScreen->driScreen;
image = screen->dri2.image->lookupEGLImage(screen, image_handle,
screen->loaderPrivate);
if (image == NULL)
return;
radeonFreeTextureImageBuffer(ctx, texImage);
texImage->Width = image->width;
texImage->Height = image->height;
texImage->Depth = 1;
texImage->_BaseFormat = GL_RGBA;
texImage->TexFormat = image->format;
radeonImage->base.RowStride = image->pitch;
texImage->InternalFormat = image->internal_format;
if(t->mt)
{
radeon_miptree_unreference(&t->mt);
t->mt = NULL;
}
/* NOTE: The following is *very* ugly and will probably break. But
I don't know how to deal with it, without creating a whole new
function like radeon_miptree_from_bo() so I'm going with the
easy but error-prone way. */
radeon_try_alloc_miptree(radeon, t);
radeon_miptree_reference(t->mt, &radeonImage->mt);
if (t->mt == NULL)
{
radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
"%s Failed to allocate miptree.\n", __func__);
return;
}
/* Particularly ugly: this is guaranteed to break, if image->bo is
not of the required size for a miptree. */
radeon_bo_unref(t->mt->bo);
radeon_bo_ref(image->bo);
t->mt->bo = image->bo;
if (!radeon_miptree_matches_image(t->mt, &radeonImage->base.Base))
fprintf(stderr, "miptree doesn't match image\n");
}
mesa_format _radeon_texformat_rgba8888 = MESA_FORMAT_NONE;
mesa_format _radeon_texformat_argb8888 = MESA_FORMAT_NONE;
mesa_format _radeon_texformat_rgb565 = MESA_FORMAT_NONE;
mesa_format _radeon_texformat_argb4444 = MESA_FORMAT_NONE;
mesa_format _radeon_texformat_argb1555 = MESA_FORMAT_NONE;
/*@}*/
static void
radeonInitTextureFormats(void)
{
#if UTIL_ARCH_LITTLE_ENDIAN
_radeon_texformat_rgba8888 = MESA_FORMAT_A8B8G8R8_UNORM;
_radeon_texformat_argb8888 = MESA_FORMAT_B8G8R8A8_UNORM;
_radeon_texformat_rgb565 = MESA_FORMAT_B5G6R5_UNORM;
_radeon_texformat_argb4444 = MESA_FORMAT_B4G4R4A4_UNORM;
_radeon_texformat_argb1555 = MESA_FORMAT_B5G5R5A1_UNORM;
#else
_radeon_texformat_rgba8888 = MESA_FORMAT_R8G8B8A8_UNORM;
_radeon_texformat_argb8888 = MESA_FORMAT_A8R8G8B8_UNORM;
_radeon_texformat_rgb565 = MESA_FORMAT_R5G6B5_UNORM;
_radeon_texformat_argb4444 = MESA_FORMAT_A4R4G4B4_UNORM;
_radeon_texformat_argb1555 = MESA_FORMAT_A1R5G5B5_UNORM;
#endif
}
void
radeon_init_common_texture_funcs(radeonContextPtr radeon,
struct dd_function_table *functions)
{
functions->NewTextureImage = radeonNewTextureImage;
functions->DeleteTextureImage = radeonDeleteTextureImage;
functions->AllocTextureImageBuffer = radeonAllocTextureImageBuffer;
functions->FreeTextureImageBuffer = radeonFreeTextureImageBuffer;
functions->MapTextureImage = radeon_map_texture_image;
functions->UnmapTextureImage = radeon_unmap_texture_image;
functions->ChooseTextureFormat = radeonChooseTextureFormat_mesa;
functions->CopyTexSubImage = radeonCopyTexSubImage;
functions->Bitmap = _mesa_meta_Bitmap;
functions->EGLImageTargetTexture2D = radeon_image_target_texture_2d;
radeonInitTextureFormats();
}
static radeon_mipmap_tree *radeon_miptree_create_for_teximage(radeonContextPtr rmesa,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage)
{
radeonTexObj *t = radeon_tex_obj(texObj);
GLuint firstLevel;
GLuint lastLevel;
int width, height, depth;
int i;
width = texImage->Width;
height = texImage->Height;
depth = texImage->Depth;
if (texImage->Level > texObj->Attrib.BaseLevel &&
(width == 1 ||
(texObj->Target != GL_TEXTURE_1D && height == 1) ||
(texObj->Target == GL_TEXTURE_3D && depth == 1))) {
/* For this combination, we're at some lower mipmap level and
* some important dimension is 1. We can't extrapolate up to a
* likely base level width/height/depth for a full mipmap stack
* from this info, so just allocate this one level.
*/
firstLevel = texImage->Level;
lastLevel = texImage->Level;
} else {
if (texImage->Level < texObj->Attrib.BaseLevel)
firstLevel = 0;
else
firstLevel = texObj->Attrib.BaseLevel;
for (i = texImage->Level; i > firstLevel; i--) {
width <<= 1;
if (height != 1)
height <<= 1;
if (depth != 1)
depth <<= 1;
}
if ((texObj->Sampler.Attrib.MinFilter == GL_NEAREST ||
texObj->Sampler.Attrib.MinFilter == GL_LINEAR) &&
texImage->Level == firstLevel) {
lastLevel = firstLevel;
} else {
lastLevel = firstLevel + util_logbase2(MAX2(MAX2(width, height), depth));
}
}
return radeon_miptree_create(rmesa, texObj->Target,
texImage->TexFormat, firstLevel, lastLevel - firstLevel + 1,
width, height, depth,
t->tile_bits);
}

View File

@ -1,82 +0,0 @@
/*
* Copyright (C) 2008 Nicolai Haehnle.
* Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
*
* The Weather Channel (TM) funded Tungsten Graphics to develop the
* initial release of the Radeon 8500 driver under the XFree86 license.
* This notice must be preserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef RADEON_TEXTURE_H
#define RADEON_TEXTURE_H
#include "main/formats.h"
extern mesa_format _radeon_texformat_rgba8888;
extern mesa_format _radeon_texformat_argb8888;
extern mesa_format _radeon_texformat_rgb565;
extern mesa_format _radeon_texformat_argb4444;
extern mesa_format _radeon_texformat_argb1555;
extern
void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
GLuint numrows, GLuint rowsize);
struct gl_texture_image *radeonNewTextureImage(struct gl_context *ctx);
void radeonFreeTextureImageBuffer(struct gl_context *ctx, struct gl_texture_image *timage);
int radeon_validate_texture_miptree(struct gl_context * ctx,
struct gl_sampler_object *samp,
struct gl_texture_object *texObj);
mesa_format radeonChooseTextureFormat_mesa(struct gl_context * ctx,
GLenum target,
GLint internalFormat,
GLenum format,
GLenum type);
mesa_format radeonChooseTextureFormat(struct gl_context * ctx,
GLint internalFormat,
GLenum format,
GLenum type, GLboolean fbo);
void radeonCopyTexSubImage(struct gl_context *ctx, GLuint dims,
struct gl_texture_image *texImage,
GLint xoffset, GLint yoffset, GLint zoffset,
struct gl_renderbuffer *rb,
GLint x, GLint y,
GLsizei width, GLsizei height);
unsigned radeonIsFormatRenderable(mesa_format mesa_format);
void radeon_image_target_texture_2d(struct gl_context *ctx, GLenum target,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage,
GLeglImageOES image_handle);
void
radeon_init_common_texture_funcs(radeonContextPtr radeon,
struct dd_function_table *functions);
#endif

View File

@ -1,513 +0,0 @@
/*
* Copyright (C) 2010 Maciej Cencora <m.cencora@gmail.com>
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "radeon_screen.h"
#include "radeon_tile.h"
#include <stdint.h>
#include <string.h>
#include "main/macros.h"
#include "radeon_debug.h"
#define MICRO_TILE_SIZE 32
static void micro_tile_8_x_4_8bit(const void * const src, unsigned src_pitch,
void * const dst, unsigned dst_pitch,
unsigned width, unsigned height)
{
unsigned row; /* current source row */
unsigned col; /* current source column */
unsigned k; /* number of processed tiles */
const unsigned tile_width = 8, tile_height = 4;
const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
k = 0;
for (row = 0; row < height; row += tile_height)
{
for (col = 0; col < width; col += tile_width, ++k)
{
uint8_t *src2 = (uint8_t *)src + src_pitch * row + col;
uint8_t *dst2 = (uint8_t *)dst + row * dst_pitch +
(k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint8_t);
unsigned j;
for (j = 0; j < MIN2(tile_height, height - row); ++j)
{
unsigned columns = MIN2(tile_width, width - col);
memcpy(dst2, src2, columns * sizeof(uint8_t));
dst2 += tile_width;
src2 += src_pitch;
}
}
}
}
static void micro_tile_4_x_4_16bit(const void * const src, unsigned src_pitch,
void * const dst, unsigned dst_pitch,
unsigned width, unsigned height)
{
unsigned row; /* current source row */
unsigned col; /* current source column */
unsigned k; /* number of processed tiles */
const unsigned tile_width = 4, tile_height = 4;
const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
k = 0;
for (row = 0; row < height; row += tile_height)
{
for (col = 0; col < width; col += tile_width, ++k)
{
uint16_t *src2 = (uint16_t *)src + src_pitch * row + col;
uint16_t *dst2 = (uint16_t *)dst + row * dst_pitch +
(k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint16_t);
unsigned j;
for (j = 0; j < MIN2(tile_height, height - row); ++j)
{
unsigned columns = MIN2(tile_width, width - col);
memcpy(dst2, src2, columns * sizeof(uint16_t));
dst2 += tile_width;
src2 += src_pitch;
}
}
}
}
static void micro_tile_8_x_2_16bit(const void * const src, unsigned src_pitch,
void * const dst, unsigned dst_pitch,
unsigned width, unsigned height)
{
unsigned row; /* current source row */
unsigned col; /* current source column */
unsigned k; /* number of processed tiles */
const unsigned tile_width = 8, tile_height = 2;
const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
k = 0;
for (row = 0; row < height; row += tile_height)
{
for (col = 0; col < width; col += tile_width, ++k)
{
uint16_t *src2 = (uint16_t *)src + src_pitch * row + col;
uint16_t *dst2 = (uint16_t *)dst + row * dst_pitch +
(k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint16_t);
unsigned j;
for (j = 0; j < MIN2(tile_height, height - row); ++j)
{
unsigned columns = MIN2(tile_width, width - col);
memcpy(dst2, src2, columns * sizeof(uint16_t));
dst2 += tile_width;
src2 += src_pitch;
}
}
}
}
static void micro_tile_4_x_2_32bit(const void * const src, unsigned src_pitch,
void * const dst, unsigned dst_pitch,
unsigned width, unsigned height)
{
unsigned row; /* current source row */
unsigned col; /* current source column */
unsigned k; /* number of processed tiles */
const unsigned tile_width = 4, tile_height = 2;
const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
k = 0;
for (row = 0; row < height; row += tile_height)
{
for (col = 0; col < width; col += tile_width, ++k)
{
uint32_t *src2 = (uint32_t *)src + src_pitch * row + col;
uint32_t *dst2 = (uint32_t *)dst + row * dst_pitch +
(k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint32_t);
unsigned j;
for (j = 0; j < MIN2(tile_height, height - row); ++j)
{
unsigned columns = MIN2(tile_width, width - col);
memcpy(dst2, src2, columns * sizeof(uint32_t));
dst2 += tile_width;
src2 += src_pitch;
}
}
}
}
static void micro_tile_2_x_2_64bit(const void * const src, unsigned src_pitch,
void * const dst, unsigned dst_pitch,
unsigned width, unsigned height)
{
unsigned row; /* current source row */
unsigned col; /* current source column */
unsigned k; /* number of processed tiles */
const unsigned tile_width = 2, tile_height = 2;
const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
k = 0;
for (row = 0; row < height; row += tile_height)
{
for (col = 0; col < width; col += tile_width, ++k)
{
uint64_t *src2 = (uint64_t *)src + src_pitch * row + col;
uint64_t *dst2 = (uint64_t *)dst + row * dst_pitch +
(k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint64_t);
unsigned j;
for (j = 0; j < MIN2(tile_height, height - row); ++j)
{
unsigned columns = MIN2(tile_width, width - col);
memcpy(dst2, src2, columns * sizeof(uint64_t));
dst2 += tile_width;
src2 += src_pitch;
}
}
}
}
static void micro_tile_1_x_1_128bit(const void * src, unsigned src_pitch,
void * dst, unsigned dst_pitch,
unsigned width, unsigned height)
{
unsigned i, j;
const unsigned elem_size = 16; /* sizeof(uint128_t) */
for (j = 0; j < height; ++j)
{
for (i = 0; i < width; ++i)
{
memcpy(dst, src, width * elem_size);
dst += dst_pitch * elem_size;
src += src_pitch * elem_size;
}
}
}
void tile_image(const void * src, unsigned src_pitch,
void *dst, unsigned dst_pitch,
mesa_format format, unsigned width, unsigned height)
{
assert(src_pitch >= width);
assert(dst_pitch >= width);
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"Software tiling: src_pitch %d, dst_pitch %d, width %d, height %d, bpp %d\n",
src_pitch, dst_pitch, width, height, _mesa_get_format_bytes(format));
switch (_mesa_get_format_bytes(format))
{
case 16:
micro_tile_1_x_1_128bit(src, src_pitch, dst, dst_pitch, width, height);
break;
case 8:
micro_tile_2_x_2_64bit(src, src_pitch, dst, dst_pitch, width, height);
break;
case 4:
micro_tile_4_x_2_32bit(src, src_pitch, dst, dst_pitch, width, height);
break;
case 2:
if (_mesa_get_format_bits(format, GL_DEPTH_BITS))
{
micro_tile_4_x_4_16bit(src, src_pitch, dst, dst_pitch, width, height);
}
else
{
micro_tile_8_x_2_16bit(src, src_pitch, dst, dst_pitch, width, height);
}
break;
case 1:
micro_tile_8_x_4_8bit(src, src_pitch, dst, dst_pitch, width, height);
break;
default:
assert(0);
break;
}
}
static void micro_untile_8_x_4_8bit(const void * const src, unsigned src_pitch,
void * const dst, unsigned dst_pitch,
unsigned width, unsigned height)
{
unsigned row; /* current destination row */
unsigned col; /* current destination column */
unsigned k; /* current tile number */
const unsigned tile_width = 8, tile_height = 4;
const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
assert(src_pitch % tile_width == 0);
k = 0;
for (row = 0; row < height; row += tile_height)
{
for (col = 0; col < width; col += tile_width, ++k)
{
uint8_t *src2 = (uint8_t *)src + row * src_pitch +
(k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint8_t);
uint8_t *dst2 = (uint8_t *)dst + dst_pitch * row + col;
unsigned j;
for (j = 0; j < MIN2(tile_height, height - row); ++j)
{
unsigned columns = MIN2(tile_width, width - col);
memcpy(dst2, src2, columns * sizeof(uint8_t));
dst2 += dst_pitch;
src2 += tile_width;
}
}
}
}
static void micro_untile_8_x_2_16bit(const void * const src, unsigned src_pitch,
void * const dst, unsigned dst_pitch,
unsigned width, unsigned height)
{
unsigned row; /* current destination row */
unsigned col; /* current destination column */
unsigned k; /* current tile number */
const unsigned tile_width = 8, tile_height = 2;
const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
assert(src_pitch % tile_width == 0);
k = 0;
for (row = 0; row < height; row += tile_height)
{
for (col = 0; col < width; col += tile_width, ++k)
{
uint16_t *src2 = (uint16_t *)src + row * src_pitch +
(k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint16_t);
uint16_t *dst2 = (uint16_t *)dst + dst_pitch * row + col;
unsigned j;
for (j = 0; j < MIN2(tile_height, height - row); ++j)
{
unsigned columns = MIN2(tile_width, width - col);
memcpy(dst2, src2, columns * sizeof(uint16_t));
dst2 += dst_pitch;
src2 += tile_width;
}
}
}
}
static void micro_untile_4_x_4_16bit(const void * const src, unsigned src_pitch,
void * const dst, unsigned dst_pitch,
unsigned width, unsigned height)
{
unsigned row; /* current destination row */
unsigned col; /* current destination column */
unsigned k; /* current tile number */
const unsigned tile_width = 4, tile_height = 4;
const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
assert(src_pitch % tile_width == 0);
k = 0;
for (row = 0; row < height; row += tile_height)
{
for (col = 0; col < width; col += tile_width, ++k)
{
uint16_t *src2 = (uint16_t *)src + row * src_pitch +
(k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint16_t);
uint16_t *dst2 = (uint16_t *)dst + dst_pitch * row + col;
unsigned j;
for (j = 0; j < MIN2(tile_height, height - row); ++j)
{
unsigned columns = MIN2(tile_width, width - col);
memcpy(dst2, src2, columns * sizeof(uint16_t));
dst2 += dst_pitch;
src2 += tile_width;
}
}
}
}
static void micro_untile_4_x_2_32bit(const void * const src, unsigned src_pitch,
void * const dst, unsigned dst_pitch,
unsigned width, unsigned height)
{
unsigned row; /* current destination row */
unsigned col; /* current destination column */
unsigned k; /* current tile number */
const unsigned tile_width = 4, tile_height = 2;
const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
assert(src_pitch % tile_width == 0);
k = 0;
for (row = 0; row < height; row += tile_height)
{
for (col = 0; col < width; col += tile_width, ++k)
{
uint32_t *src2 = (uint32_t *)src + row * src_pitch +
(k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint32_t);
uint32_t *dst2 = (uint32_t *)dst + dst_pitch * row + col;
unsigned j;
for (j = 0; j < MIN2(tile_height, height - row); ++j)
{
unsigned columns = MIN2(tile_width, width - col);
memcpy(dst2, src2, columns * sizeof(uint32_t));
dst2 += dst_pitch;
src2 += tile_width;
}
}
}
}
static void micro_untile_2_x_2_64bit(const void * const src, unsigned src_pitch,
void * const dst, unsigned dst_pitch,
unsigned width, unsigned height)
{
unsigned row; /* current destination row */
unsigned col; /* current destination column */
unsigned k; /* current tile number */
const unsigned tile_width = 2, tile_height = 2;
const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
assert(src_pitch % tile_width == 0);
k = 0;
for (row = 0; row < height; row += tile_height)
{
for (col = 0; col < width; col += tile_width, ++k)
{
uint64_t *src2 = (uint64_t *)src + row * src_pitch +
(k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint64_t);
uint64_t *dst2 = (uint64_t *)dst + dst_pitch * row + col;
unsigned j;
for (j = 0; j < MIN2(tile_height, height - row); ++j)
{
unsigned columns = MIN2(tile_width, width - col);
memcpy(dst2, src2, columns * sizeof(uint64_t));
dst2 += dst_pitch;
src2 += tile_width;
}
}
}
}
static void micro_untile_1_x_1_128bit(const void * src, unsigned src_pitch,
void * dst, unsigned dst_pitch,
unsigned width, unsigned height)
{
unsigned i, j;
const unsigned elem_size = 16; /* sizeof(uint128_t) */
for (j = 0; j < height; ++j)
{
for (i = 0; i < width; ++i)
{
memcpy(dst, src, width * elem_size);
dst += dst_pitch * elem_size;
src += src_pitch * elem_size;
}
}
}
void untile_image(const void * src, unsigned src_pitch,
void *dst, unsigned dst_pitch,
mesa_format format, unsigned width, unsigned height)
{
assert(src_pitch >= width);
assert(dst_pitch >= width);
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"Software untiling: src_pitch %d, dst_pitch %d, width %d, height %d, bpp %d\n",
src_pitch, dst_pitch, width, height, _mesa_get_format_bytes(format));
switch (_mesa_get_format_bytes(format))
{
case 16:
micro_untile_1_x_1_128bit(src, src_pitch, dst, dst_pitch, width, height);
break;
case 8:
micro_untile_2_x_2_64bit(src, src_pitch, dst, dst_pitch, width, height);
break;
case 4:
micro_untile_4_x_2_32bit(src, src_pitch, dst, dst_pitch, width, height);
break;
case 2:
if (_mesa_get_format_bits(format, GL_DEPTH_BITS))
{
micro_untile_4_x_4_16bit(src, src_pitch, dst, dst_pitch, width, height);
}
else
{
micro_untile_8_x_2_16bit(src, src_pitch, dst, dst_pitch, width, height);
}
break;
case 1:
micro_untile_8_x_4_8bit(src, src_pitch, dst, dst_pitch, width, height);
break;
default:
assert(0);
break;
}
}
void get_tile_size(mesa_format format, unsigned *block_width, unsigned *block_height)
{
switch (_mesa_get_format_bytes(format))
{
case 16:
*block_width = 1;
*block_height = 1;
break;
case 8:
*block_width = 2;
*block_height = 2;
break;
case 4:
*block_width = 4;
*block_height = 2;
break;
case 2:
if (_mesa_get_format_bits(format, GL_DEPTH_BITS))
{
*block_width = 4;
*block_height = 4;
}
else
{
*block_width = 8;
*block_height = 2;
}
break;
case 1:
*block_width = 8;
*block_height = 4;
break;
default:
assert(0);
break;
}
}

View File

@ -1,38 +0,0 @@
/*
* Copyright (C) 2010 Maciej Cencora <m.cencora@gmail.com>
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "main/formats.h"
void tile_image(const void * src, unsigned src_pitch,
void *dst, unsigned dst_pitch,
mesa_format format, unsigned width, unsigned height);
void untile_image(const void * src, unsigned src_pitch,
void *dst, unsigned dst_pitch,
mesa_format format, unsigned width, unsigned height);
void get_tile_size(mesa_format format, unsigned *block_width, unsigned *block_height);

File diff suppressed because it is too large Load Diff