classic/i915: Remove driver

This is only going to be supported in the Amber branch

Reviewed-by: Emma Anholt <emma@anholt.net>
Acked-by: Jason Ekstrand <jason@jlekstrand.net>
Acked-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Adam Jackson <ajax@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10153>
This commit is contained in:
Dylan Baker 2021-04-09 10:00:44 -07:00 committed by Marge Bot
parent f464871932
commit 0cad451f00
69 changed files with 5 additions and 23447 deletions

View File

@ -825,7 +825,7 @@ fedora-release:
-Wno-error=uninitialized
CPP_ARGS: >
-Wno-error=array-bounds
DRI_DRIVERS: "i915,i965"
DRI_DRIVERS: "i965"
DRI_LOADERS: >
-D glx=dri
-D gbm=enabled
@ -1118,7 +1118,7 @@ debian-i386:
CROSS: i386
VULKAN_DRIVERS: intel,amd,swrast,virtio-experimental
GALLIUM_DRIVERS: "iris,nouveau,r300,r600,radeonsi,swrast,virgl,zink,crocus"
DRI_DRIVERS: "i915,i965"
DRI_DRIVERS: "i965"
EXTRA_OPTION: >
-D vulkan-layers=device-select,overlay

View File

@ -178,7 +178,7 @@ if dri_drivers.contains('auto')
if system_has_kms_drm
# TODO: PPC, Sparc
if ['x86', 'x86_64'].contains(host_machine.cpu_family())
dri_drivers = ['i915', 'i965']
dri_drivers = ['i965']
elif ['arm', 'aarch64', 'mips', 'mips64'].contains(host_machine.cpu_family())
dri_drivers = []
else
@ -194,7 +194,6 @@ if dri_drivers.contains('auto')
endif
endif
with_dri_i915 = dri_drivers.contains('i915')
with_dri_i965 = dri_drivers.contains('i965')
with_dri = dri_drivers.length() != 0
@ -299,9 +298,6 @@ with_any_intel = with_dri_i965 or with_intel_vk or with_gallium_iris or with_gal
if with_swrast_vk and not with_gallium_softpipe
error('swrast vulkan requires gallium swrast')
endif
if with_dri_i915 and with_gallium_i915
error('Only one i915 provider can be built')
endif
if with_gallium_tegra and not with_gallium_nouveau
error('tegra driver requires nouveau driver')
endif
@ -1591,7 +1587,7 @@ _drm_intel_ver = '2.4.75'
_drm_ver = '2.4.109'
_libdrm_checks = [
['intel', with_dri_i915 or with_gallium_i915],
['intel', with_gallium_i915],
['amdgpu', (with_amd_vk and not with_platform_windows) or with_gallium_radeonsi],
['radeon', (with_gallium_radeonsi or with_gallium_r300 or with_gallium_r600)],
['nouveau', with_gallium_nouveau],

View File

@ -55,7 +55,7 @@ option(
'dri-drivers',
type : 'array',
value : ['auto'],
choices : ['auto', 'i915', 'i965'],
choices : ['auto', 'i965'],
description : 'List of dri drivers to build. If this is set to auto all drivers applicable to the target OS/architecture will be built'
)
option(

View File

@ -8,18 +8,6 @@
# error "Only include from loader.c"
#endif
static const int i830_chip_ids[] = {
#define CHIPSET(chip, desc, name) chip,
#include "pci_ids/i830_pci_ids.h"
#undef CHIPSET
};
static const int i915_chip_ids[] = {
#define CHIPSET(chip, desc, name) chip,
#include "pci_ids/i915_pci_ids.h"
#undef CHIPSET
};
static const int i965_chip_ids[] = {
#define CHIPSET(chip, family, family_str, name) chip,
#include "pci_ids/i965_pci_ids.h"
@ -65,8 +53,6 @@ static const struct {
int num_chips_ids;
bool (*predicate)(int fd);
} driver_map[] = {
{ 0x8086, "i830", i830_chip_ids, ARRAY_SIZE(i830_chip_ids) },
{ 0x8086, "i915", i915_chip_ids, ARRAY_SIZE(i915_chip_ids) },
{ 0x8086, "i965", i965_chip_ids, ARRAY_SIZE(i965_chip_ids) },
{ 0x8086, "crocus", crocus_chip_ids, ARRAY_SIZE(crocus_chip_ids) },
{ 0x8086, "iris", NULL, -1, is_kernel_i915 },

View File

@ -1,135 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "i830_context.h"
#include "main/api_exec.h"
#include "main/extensions.h"
#include "main/version.h"
#include "main/vtxfmt.h"
#include "tnl/tnl.h"
#include "tnl/t_vertex.h"
#include "tnl/t_context.h"
#include "tnl/t_pipeline.h"
#include "intel_tris.h"
#include "util/ralloc.h"
#include "util/u_memory.h"
/***************************************
* Mesa's Driver Functions
***************************************/
static void
i830InitDriverFunctions(struct dd_function_table *functions)
{
intelInitDriverFunctions(functions);
i830InitStateFuncs(functions);
}
extern const struct tnl_pipeline_stage *intel_pipeline[];
bool
i830CreateContext(int api,
const struct gl_config * mesaVis,
__DRIcontext * driContextPriv,
unsigned major_version,
unsigned minor_version,
uint32_t flags,
unsigned *error,
void *sharedContextPrivate)
{
struct dd_function_table functions;
struct i830_context *i830 = align_calloc(sizeof(struct i830_context), 16);
struct intel_context *intel = &i830->intel;
struct gl_context *ctx = &intel->ctx;
if (!i830) {
*error = __DRI_CTX_ERROR_NO_MEMORY;
return false;
}
i830InitVtbl(i830);
i830InitDriverFunctions(&functions);
if (!intelInitContext(intel, __DRI_API_OPENGL,
major_version, minor_version, flags,
mesaVis, driContextPriv,
sharedContextPrivate, &functions,
error)) {
align_free(i830);
return false;
}
intel_init_texture_formats(ctx);
_math_matrix_ctr(&intel->ViewportMatrix);
/* Initialize swrast, tnl driver tables: */
intelInitTriFuncs(ctx);
/* Install the customized pipeline: */
_tnl_destroy_pipeline(ctx);
_tnl_install_pipeline(ctx, intel_pipeline);
if (intel->no_rast)
FALLBACK(intel, INTEL_FALLBACK_USER, 1);
intel->ctx.Const.MaxTextureUnits = I830_TEX_UNITS;
intel->ctx.Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = I830_TEX_UNITS;
intel->ctx.Const.MaxTextureCoordUnits = I830_TEX_UNITS;
/* Advertise the full hardware capabilities. The new memory
* manager should cope much better with overload situations:
*/
ctx->Const.MaxTextureSize = 2048;
ctx->Const.Max3DTextureLevels = 9;
ctx->Const.MaxCubeTextureLevels = 11;
ctx->Const.MaxTextureRectSize = (1 << 11);
ctx->Const.MaxTextureUnits = I830_TEX_UNITS;
ctx->Const.MaxTextureMaxAnisotropy = 2.0;
ctx->Const.MaxDrawBuffers = 1;
ctx->Const.QueryCounterBits.SamplesPassed = 0;
_tnl_init_vertices(ctx, ctx->Const.MaxArrayLockSize + 12,
18 * sizeof(GLfloat));
intel->verts = TNL_CONTEXT(ctx)->clipspace.vertex_buf;
i830InitState(i830);
_tnl_allow_vertex_fog(ctx, 1);
_tnl_allow_pixel_fog(ctx, 0);
_mesa_override_extensions(ctx);
_mesa_compute_version(ctx);
_mesa_initialize_dispatch_tables(ctx);
_mesa_initialize_vbo_vtxfmt(ctx);
return true;
}

View File

@ -1,225 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef I830CONTEXT_INC
#define I830CONTEXT_INC
#include "intel_context.h"
#define I830_FALLBACK_TEXTURE 0x1000
#define I830_FALLBACK_COLORMASK 0x2000
#define I830_FALLBACK_STENCIL 0x4000
#define I830_FALLBACK_STIPPLE 0x8000
#define I830_FALLBACK_LOGICOP 0x20000
#define I830_FALLBACK_DRAW_OFFSET 0x200000
#define I830_UPLOAD_CTX 0x1
#define I830_UPLOAD_BUFFERS 0x2
#define I830_UPLOAD_STIPPLE 0x4
#define I830_UPLOAD_INVARIENT 0x8
#define I830_UPLOAD_RASTER_RULES 0x10
#define I830_UPLOAD_TEX(i) (0x0100<<(i))
#define I830_UPLOAD_TEXBLEND(i) (0x1000<<(i))
#define I830_UPLOAD_TEX_ALL (0x0f00)
#define I830_UPLOAD_TEXBLEND_ALL (0xf000)
/* State structure offsets - these will probably disappear.
*/
#define I830_DESTREG_CBUFADDR0 0
#define I830_DESTREG_CBUFADDR1 1
#define I830_DESTREG_DBUFADDR0 2
#define I830_DESTREG_DBUFADDR1 3
#define I830_DESTREG_DV0 4
#define I830_DESTREG_DV1 5
#define I830_DESTREG_SR0 6
#define I830_DESTREG_SR1 7
#define I830_DESTREG_SR2 8
#define I830_DESTREG_SENABLE 9
#define I830_DESTREG_DRAWRECT0 10
#define I830_DESTREG_DRAWRECT1 11
#define I830_DESTREG_DRAWRECT2 12
#define I830_DESTREG_DRAWRECT3 13
#define I830_DESTREG_DRAWRECT4 14
#define I830_DESTREG_DRAWRECT5 15
#define I830_DEST_SETUP_SIZE 16
#define I830_CTXREG_STATE1 0
#define I830_CTXREG_STATE2 1
#define I830_CTXREG_STATE3 2
#define I830_CTXREG_STATE4 3
#define I830_CTXREG_STATE5 4
#define I830_CTXREG_IALPHAB 5
#define I830_CTXREG_STENCILTST 6
#define I830_CTXREG_ENABLES_1 7
#define I830_CTXREG_ENABLES_2 8
#define I830_CTXREG_AA 9
#define I830_CTXREG_FOGCOLOR 10
#define I830_CTXREG_BLENDCOLOR0 11
#define I830_CTXREG_BLENDCOLOR1 12
#define I830_CTXREG_VF 13
#define I830_CTXREG_VF2 14
#define I830_CTXREG_MCSB0 15
#define I830_CTXREG_MCSB1 16
#define I830_CTX_SETUP_SIZE 17
#define I830_STPREG_ST0 0
#define I830_STPREG_ST1 1
#define I830_STP_SETUP_SIZE 2
#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */
#define I830_TEXREG_TM0S1 1
#define I830_TEXREG_TM0S2 2
#define I830_TEXREG_TM0S3 3
#define I830_TEXREG_TM0S4 4
#define I830_TEXREG_MCS 5 /* _3DSTATE_MAP_COORD_SETS */
#define I830_TEXREG_CUBE 6 /* _3DSTATE_MAP_SUBE */
#define I830_TEX_SETUP_SIZE 7
#define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */
enum {
I830_RASTER_RULES,
I830_RASTER_RULES_SIZE
};
struct i830_texture_object
{
struct intel_texture_object intel;
GLuint Setup[I830_TEX_SETUP_SIZE];
};
#define I830_TEX_UNITS 4
struct i830_hw_state
{
GLuint Ctx[I830_CTX_SETUP_SIZE];
GLuint Buffer[I830_DEST_SETUP_SIZE];
GLuint Stipple[I830_STP_SETUP_SIZE];
GLuint RasterRules[I830_RASTER_RULES_SIZE];
GLuint Tex[I830_TEX_UNITS][I830_TEX_SETUP_SIZE];
GLuint TexBlend[I830_TEX_UNITS][I830_TEXBLEND_SIZE];
GLuint TexBlendWordsUsed[I830_TEX_UNITS];
struct intel_region *draw_region;
struct intel_region *depth_region;
/* Regions aren't actually that appropriate here as the memory may
* be from a PBO or FBO. Will have to do this for draw and depth for
* FBO's...
*/
drm_intel_bo *tex_buffer[I830_TEX_UNITS];
GLuint tex_offset[I830_TEX_UNITS];
GLuint emitted; /* I810_UPLOAD_* */
GLuint active;
};
struct i830_context
{
struct intel_context intel;
GLuint lodbias_tm0s3[MAX_TEXTURE_UNITS];
GLbitfield64 last_index_bitset;
struct i830_hw_state state;
};
#define I830_STATECHANGE(i830, flag) \
do { \
INTEL_FIREVERTICES( &i830->intel ); \
i830->state.emitted &= ~flag; \
} while (0)
#define I830_ACTIVESTATE(i830, flag, mode) \
do { \
INTEL_FIREVERTICES( &i830->intel ); \
if (mode) \
i830->state.active |= flag; \
else \
i830->state.active &= ~flag; \
} while (0)
/* i830_vtbl.c
*/
extern void i830InitVtbl(struct i830_context *i830);
extern void
i830_state_draw_region(struct intel_context *intel,
struct i830_hw_state *state,
struct intel_region *color_region,
struct intel_region *depth_region);
/* i830_context.c
*/
extern bool
i830CreateContext(int api,
const struct gl_config * mesaVis,
__DRIcontext * driContextPriv,
unsigned major_version,
unsigned minor_version,
uint32_t flags,
unsigned *error,
void *sharedContextPrivate);
/* i830_tex.c, i830_texstate.c
*/
extern void i830UpdateTextureState(struct intel_context *intel);
extern void i830InitTextureFuncs(struct dd_function_table *functions);
/* i830_texblend.c
*/
extern GLuint i830SetTexEnvCombine(struct i830_context *i830,
const struct gl_tex_env_combine_state
*combine, GLint blendUnit, GLuint texel_op,
GLuint * state, const GLfloat * factor);
extern void i830EmitTextureBlend(struct i830_context *i830);
/* i830_state.c
*/
extern void i830InitStateFuncs(struct dd_function_table *functions);
extern void i830EmitState(struct i830_context *i830);
extern void i830InitState(struct i830_context *i830);
extern void i830_update_provoking_vertex(struct gl_context *ctx);
/*======================================================================
* Inline conversion functions. These are better-typed than the
* macros used previously:
*/
static inline struct i830_context *
i830_context(struct gl_context * ctx)
{
return (struct i830_context *) ctx;
}
#endif

View File

@ -1,628 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef _I830_REG_H_
#define _I830_REG_H_
#include "intel_reg.h"
#define I830_SET_FIELD( var, mask, value ) (var &= ~(mask), var |= value)
#define _3DSTATE_AA_CMD (CMD_3D | (0x06<<24))
#define AA_LINE_ECAAR_WIDTH_ENABLE (1<<16)
#define AA_LINE_ECAAR_WIDTH_0_5 0
#define AA_LINE_ECAAR_WIDTH_1_0 (1<<14)
#define AA_LINE_ECAAR_WIDTH_2_0 (2<<14)
#define AA_LINE_ECAAR_WIDTH_4_0 (3<<14)
#define AA_LINE_REGION_WIDTH_ENABLE (1<<8)
#define AA_LINE_REGION_WIDTH_0_5 0
#define AA_LINE_REGION_WIDTH_1_0 (1<<6)
#define AA_LINE_REGION_WIDTH_2_0 (2<<6)
#define AA_LINE_REGION_WIDTH_4_0 (3<<6)
#define AA_LINE_ENABLE ((1<<1) | 1)
#define AA_LINE_DISABLE (1<<1)
#define _3DSTATE_COLOR_FACTOR_CMD (CMD_3D | (0x1d<<24) | (0x1<<16))
#define _3DSTATE_COLOR_FACTOR_N_CMD(stage) (CMD_3D | (0x1d<<24) | \
((0x90+(stage))<<16))
#define _3DSTATE_CONST_BLEND_COLOR_CMD (CMD_3D | (0x1d<<24) | (0x88<<16))
#define _3DSTATE_DFLT_DIFFUSE_CMD (CMD_3D | (0x1d<<24) | (0x99<<16))
#define _3DSTATE_DFLT_SPEC_CMD (CMD_3D | (0x1d<<24) | (0x9a<<16))
#define _3DSTATE_DFLT_Z_CMD (CMD_3D | (0x1d<<24) | (0x98<<16))
#define _3DSTATE_DST_BUF_VARS_CMD (CMD_3D | (0x1d<<24) | (0x85<<16))
/* Dword 1 */
#define DSTORG_HORT_BIAS(x) ((x)<<20)
#define DSTORG_VERT_BIAS(x) ((x)<<16)
#define COLOR_4_2_2_CHNL_WRT_ALL 0
#define COLOR_4_2_2_CHNL_WRT_Y (1<<12)
#define COLOR_4_2_2_CHNL_WRT_CR (2<<12)
#define COLOR_4_2_2_CHNL_WRT_CB (3<<12)
#define COLOR_4_2_2_CHNL_WRT_CRCB (4<<12)
#define COLR_BUF_8BIT 0
#define COLR_BUF_RGB555 (1<<8)
#define COLR_BUF_RGB565 (2<<8)
#define COLR_BUF_ARGB8888 (3<<8)
#define DEPTH_IS_Z 0
#define DEPTH_IS_W (1<<6)
#define DEPTH_FRMT_16_FIXED 0
#define DEPTH_FRMT_16_FLOAT (1<<2)
#define DEPTH_FRMT_24_FIXED_8_OTHER (2<<2)
#define DEPTH_FRMT_24_FLOAT_8_OTHER (3<<2)
#define VERT_LINE_STRIDE_1 (1<<1)
#define VERT_LINE_STRIDE_0 0
#define VERT_LINE_STRIDE_OFS_1 1
#define VERT_LINE_STRIDE_OFS_0 0
#define _3DSTATE_DRAW_RECT_CMD (CMD_3D|(0x1d<<24)|(0x80<<16)|3)
/* Dword 1 */
#define DRAW_RECT_DIS_DEPTH_OFS (1<<30)
#define DRAW_DITHER_OFS_X(x) ((x)<<26)
#define DRAW_DITHER_OFS_Y(x) ((x)<<24)
/* Dword 2 */
#define DRAW_YMIN(x) ((x)<<16)
#define DRAW_XMIN(x) (x)
/* Dword 3 */
#define DRAW_YMAX(x) ((x)<<16)
#define DRAW_XMAX(x) (x)
/* Dword 4 */
#define DRAW_YORG(x) ((x)<<16)
#define DRAW_XORG(x) (x)
#define _3DSTATE_ENABLES_1_CMD (CMD_3D|(0x3<<24))
#define ENABLE_LOGIC_OP_MASK ((1<<23)|(1<<22))
#define ENABLE_LOGIC_OP ((1<<23)|(1<<22))
#define DISABLE_LOGIC_OP (1<<23)
#define ENABLE_STENCIL_TEST ((1<<21)|(1<<20))
#define DISABLE_STENCIL_TEST (1<<21)
#define ENABLE_DEPTH_BIAS ((1<<11)|(1<<10))
#define DISABLE_DEPTH_BIAS (1<<11)
#define ENABLE_SPEC_ADD_MASK ((1<<9)|(1<<8))
#define ENABLE_SPEC_ADD ((1<<9)|(1<<8))
#define DISABLE_SPEC_ADD (1<<9)
#define ENABLE_DIS_FOG_MASK ((1<<7)|(1<<6))
#define ENABLE_FOG ((1<<7)|(1<<6))
#define DISABLE_FOG (1<<7)
#define ENABLE_DIS_ALPHA_TEST_MASK ((1<<5)|(1<<4))
#define ENABLE_ALPHA_TEST ((1<<5)|(1<<4))
#define DISABLE_ALPHA_TEST (1<<5)
#define ENABLE_DIS_CBLEND_MASK ((1<<3)|(1<<2))
#define ENABLE_COLOR_BLEND ((1<<3)|(1<<2))
#define DISABLE_COLOR_BLEND (1<<3)
#define ENABLE_DIS_DEPTH_TEST_MASK ((1<<1)|1)
#define ENABLE_DEPTH_TEST ((1<<1)|1)
#define DISABLE_DEPTH_TEST (1<<1)
/* _3DSTATE_ENABLES_2, p138 */
#define _3DSTATE_ENABLES_2_CMD (CMD_3D|(0x4<<24))
#define ENABLE_STENCIL_WRITE ((1<<21)|(1<<20))
#define DISABLE_STENCIL_WRITE (1<<21)
#define ENABLE_TEX_CACHE ((1<<17)|(1<<16))
#define DISABLE_TEX_CACHE (1<<17)
#define ENABLE_DITHER ((1<<9)|(1<<8))
#define DISABLE_DITHER (1<<9)
#define ENABLE_COLOR_MASK (1<<10)
#define WRITEMASK_ALPHA (1<<7)
#define WRITEMASK_ALPHA_SHIFT 7
#define WRITEMASK_RED (1<<6)
#define WRITEMASK_RED_SHIFT 6
#define WRITEMASK_GREEN (1<<5)
#define WRITEMASK_GREEN_SHIFT 5
#define WRITEMASK_BLUE (1<<4)
#define WRITEMASK_BLUE_SHIFT 4
#define WRITEMASK_MASK ((1<<4)|(1<<5)|(1<<6)|(1<<7))
#define ENABLE_COLOR_WRITE ((1<<3)|(1<<2))
#define DISABLE_COLOR_WRITE (1<<3)
#define ENABLE_DIS_DEPTH_WRITE_MASK 0x3
#define ENABLE_DEPTH_WRITE ((1<<1)|1)
#define DISABLE_DEPTH_WRITE (1<<1)
/* _3DSTATE_FOG_COLOR, p139 */
#define _3DSTATE_FOG_COLOR_CMD (CMD_3D|(0x15<<24))
#define FOG_COLOR_RED(x) ((x)<<16)
#define FOG_COLOR_GREEN(x) ((x)<<8)
#define FOG_COLOR_BLUE(x) (x)
/* _3DSTATE_FOG_MODE, p140 */
#define _3DSTATE_FOG_MODE_CMD (CMD_3D|(0x1d<<24)|(0x89<<16)|2)
/* Dword 1 */
#define FOGFUNC_ENABLE (1<<31)
#define FOGFUNC_VERTEX 0
#define FOGFUNC_PIXEL_EXP (1<<28)
#define FOGFUNC_PIXEL_EXP2 (2<<28)
#define FOGFUNC_PIXEL_LINEAR (3<<28)
#define FOGSRC_INDEX_Z (1<<27)
#define FOGSRC_INDEX_W ((1<<27)|(1<<25))
#define FOG_LINEAR_CONST (1<<24)
#define FOG_CONST_1(x) ((x)<<4)
#define ENABLE_FOG_DENSITY (1<<23)
/* Dword 2 */
#define FOG_CONST_2(x) (x)
/* Dword 3 */
#define FOG_DENSITY(x) (x)
/* _3DSTATE_INDEPENDENT_ALPHA_BLEND, p142 */
#define _3DSTATE_INDPT_ALPHA_BLEND_CMD (CMD_3D|(0x0b<<24))
#define ENABLE_INDPT_ALPHA_BLEND ((1<<23)|(1<<22))
#define DISABLE_INDPT_ALPHA_BLEND (1<<23)
#define ALPHA_BLENDFUNC_MASK 0x3f0000
#define ENABLE_ALPHA_BLENDFUNC (1<<21)
#define ABLENDFUNC_ADD 0
#define ABLENDFUNC_SUB (1<<16)
#define ABLENDFUNC_RVSE_SUB (2<<16)
#define ABLENDFUNC_MIN (3<<16)
#define ABLENDFUNC_MAX (4<<16)
#define SRC_DST_ABLEND_MASK 0xfff
#define ENABLE_SRC_ABLEND_FACTOR (1<<11)
#define SRC_ABLEND_FACT(x) ((x)<<6)
#define ENABLE_DST_ABLEND_FACTOR (1<<5)
#define DST_ABLEND_FACT(x) (x)
/* _3DSTATE_MAP_BLEND_ARG, p152 */
#define _3DSTATE_MAP_BLEND_ARG_CMD(stage) (CMD_3D|(0x0e<<24)|((stage)<<20))
#define TEXPIPE_COLOR 0
#define TEXPIPE_ALPHA (1<<18)
#define TEXPIPE_KILL (2<<18)
#define TEXBLEND_ARG0 0
#define TEXBLEND_ARG1 (1<<15)
#define TEXBLEND_ARG2 (2<<15)
#define TEXBLEND_ARG3 (3<<15)
#define TEXBLENDARG_MODIFY_PARMS (1<<6)
#define TEXBLENDARG_REPLICATE_ALPHA (1<<5)
#define TEXBLENDARG_INV_ARG (1<<4)
#define TEXBLENDARG_ONE 0
#define TEXBLENDARG_FACTOR 0x01
#define TEXBLENDARG_ACCUM 0x02
#define TEXBLENDARG_DIFFUSE 0x03
#define TEXBLENDARG_SPEC 0x04
#define TEXBLENDARG_CURRENT 0x05
#define TEXBLENDARG_TEXEL0 0x06
#define TEXBLENDARG_TEXEL1 0x07
#define TEXBLENDARG_TEXEL2 0x08
#define TEXBLENDARG_TEXEL3 0x09
#define TEXBLENDARG_FACTOR_N 0x0e
/* _3DSTATE_MAP_BLEND_OP, p155 */
#define _3DSTATE_MAP_BLEND_OP_CMD(stage) (CMD_3D|(0x0d<<24)|((stage)<<20))
#if 0
# define TEXPIPE_COLOR 0
# define TEXPIPE_ALPHA (1<<18)
# define TEXPIPE_KILL (2<<18)
#endif
#define ENABLE_TEXOUTPUT_WRT_SEL (1<<17)
#define TEXOP_OUTPUT_CURRENT 0
#define TEXOP_OUTPUT_ACCUM (1<<15)
#define ENABLE_TEX_CNTRL_STAGE ((1<<12)|(1<<11))
#define DISABLE_TEX_CNTRL_STAGE (1<<12)
#define TEXOP_SCALE_SHIFT 9
#define TEXOP_SCALE_1X (0 << TEXOP_SCALE_SHIFT)
#define TEXOP_SCALE_2X (1 << TEXOP_SCALE_SHIFT)
#define TEXOP_SCALE_4X (2 << TEXOP_SCALE_SHIFT)
#define TEXOP_MODIFY_PARMS (1<<8)
#define TEXOP_LAST_STAGE (1<<7)
#define TEXBLENDOP_KILLPIXEL 0x02
#define TEXBLENDOP_ARG1 0x01
#define TEXBLENDOP_ARG2 0x02
#define TEXBLENDOP_MODULATE 0x03
#define TEXBLENDOP_ADD 0x06
#define TEXBLENDOP_ADDSIGNED 0x07
#define TEXBLENDOP_BLEND 0x08
#define TEXBLENDOP_BLEND_AND_ADD 0x09
#define TEXBLENDOP_SUBTRACT 0x0a
#define TEXBLENDOP_DOT3 0x0b
#define TEXBLENDOP_DOT4 0x0c
#define TEXBLENDOP_MODULATE_AND_ADD 0x0d
#define TEXBLENDOP_MODULATE_2X_AND_ADD 0x0e
#define TEXBLENDOP_MODULATE_4X_AND_ADD 0x0f
/* _3DSTATE_MAP_BUMP_TABLE, p160 TODO */
/* _3DSTATE_MAP_COLOR_CHROMA_KEY, p161 TODO */
#define _3DSTATE_MAP_COORD_TRANSFORM ((3<<29)|(0x1d<<24)|(0x8c<<16))
#define DISABLE_TEX_TRANSFORM (1<<28)
#define TEXTURE_SET(x) ((x)<<29)
#define _3DSTATE_VERTEX_TRANSFORM ((3<<29)|(0x1d<<24)|(0x8b<<16))
#define DISABLE_VIEWPORT_TRANSFORM (1<<31)
#define DISABLE_PERSPECTIVE_DIVIDE (1<<29)
/* _3DSTATE_MAP_COORD_SET_BINDINGS, p162 */
#define _3DSTATE_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16))
#define TEXBIND_MASK3 ((1<<15)|(1<<14)|(1<<13)|(1<<12))
#define TEXBIND_MASK2 ((1<<11)|(1<<10)|(1<<9)|(1<<8))
#define TEXBIND_MASK1 ((1<<7)|(1<<6)|(1<<5)|(1<<4))
#define TEXBIND_MASK0 ((1<<3)|(1<<2)|(1<<1)|1)
#define TEXBIND_SET3(x) ((x)<<12)
#define TEXBIND_SET2(x) ((x)<<8)
#define TEXBIND_SET1(x) ((x)<<4)
#define TEXBIND_SET0(x) (x)
#define TEXCOORDSRC_KEEP 0
#define TEXCOORDSRC_DEFAULT 0x01
#define TEXCOORDSRC_VTXSET_0 0x08
#define TEXCOORDSRC_VTXSET_1 0x09
#define TEXCOORDSRC_VTXSET_2 0x0a
#define TEXCOORDSRC_VTXSET_3 0x0b
#define TEXCOORDSRC_VTXSET_4 0x0c
#define TEXCOORDSRC_VTXSET_5 0x0d
#define TEXCOORDSRC_VTXSET_6 0x0e
#define TEXCOORDSRC_VTXSET_7 0x0f
#define MAP_UNIT(unit) ((unit)<<16)
#define MAP_UNIT_MASK (0x7<<16)
/* _3DSTATE_MAP_COORD_SETS, p164 */
#define _3DSTATE_MAP_COORD_SET_CMD (CMD_3D|(0x1c<<24)|(0x01<<19))
#define ENABLE_TEXCOORD_PARAMS (1<<15)
#define TEXCOORDS_ARE_NORMAL (1<<14)
#define TEXCOORDS_ARE_IN_TEXELUNITS 0
#define TEXCOORDTYPE_CARTESIAN 0
#define TEXCOORDTYPE_HOMOGENEOUS (1<<11)
#define TEXCOORDTYPE_VECTOR (2<<11)
#define TEXCOORDTYPE_MASK (0x7<<11)
#define ENABLE_ADDR_V_CNTL (1<<7)
#define ENABLE_ADDR_U_CNTL (1<<3)
#define TEXCOORD_ADDR_V_MODE(x) ((x)<<4)
#define TEXCOORD_ADDR_U_MODE(x) (x)
#define TEXCOORDMODE_WRAP 0
#define TEXCOORDMODE_MIRROR 1
#define TEXCOORDMODE_CLAMP 2
#define TEXCOORDMODE_WRAP_SHORTEST 3
#define TEXCOORDMODE_CLAMP_BORDER 4
#define TEXCOORD_ADDR_V_MASK 0x70
#define TEXCOORD_ADDR_U_MASK 0x7
/* _3DSTATE_MAP_CUBE, p168 TODO */
#define _3DSTATE_MAP_CUBE (CMD_3D|(0x1c<<24)|(0x0a<<19))
#define CUBE_NEGX_ENABLE (1<<5)
#define CUBE_POSX_ENABLE (1<<4)
#define CUBE_NEGY_ENABLE (1<<3)
#define CUBE_POSY_ENABLE (1<<2)
#define CUBE_NEGZ_ENABLE (1<<1)
#define CUBE_POSZ_ENABLE (1<<0)
/* _3DSTATE_MODES_1, p190 */
#define _3DSTATE_MODES_1_CMD (CMD_3D|(0x08<<24))
#define BLENDFUNC_MASK 0x3f0000
#define ENABLE_COLR_BLND_FUNC (1<<21)
#define BLENDFUNC_ADD 0
#define BLENDFUNC_SUB (1<<16)
#define BLENDFUNC_RVRSE_SUB (2<<16)
#define BLENDFUNC_MIN (3<<16)
#define BLENDFUNC_MAX (4<<16)
#define SRC_DST_BLND_MASK 0xfff
#define ENABLE_SRC_BLND_FACTOR (1<<11)
#define ENABLE_DST_BLND_FACTOR (1<<5)
#define SRC_BLND_FACT(x) ((x)<<6)
#define DST_BLND_FACT(x) (x)
/* _3DSTATE_MODES_2, p192 */
#define _3DSTATE_MODES_2_CMD (CMD_3D|(0x0f<<24))
#define ENABLE_GLOBAL_DEPTH_BIAS (1<<22)
#define GLOBAL_DEPTH_BIAS(x) ((x)<<14)
#define ENABLE_ALPHA_TEST_FUNC (1<<13)
#define ENABLE_ALPHA_REF_VALUE (1<<8)
#define ALPHA_TEST_FUNC(x) ((x)<<9)
#define ALPHA_REF_VALUE(x) (x)
#define ALPHA_TEST_REF_MASK 0x3fff
/* _3DSTATE_MODES_3, p193 */
#define _3DSTATE_MODES_3_CMD (CMD_3D|(0x02<<24))
#define DEPTH_TEST_FUNC_MASK 0x1f0000
#define ENABLE_DEPTH_TEST_FUNC (1<<20)
/* Uses COMPAREFUNC */
#define DEPTH_TEST_FUNC(x) ((x)<<16)
#define ENABLE_ALPHA_SHADE_MODE (1<<11)
#define ENABLE_FOG_SHADE_MODE (1<<9)
#define ENABLE_SPEC_SHADE_MODE (1<<7)
#define ENABLE_COLOR_SHADE_MODE (1<<5)
#define ALPHA_SHADE_MODE(x) ((x)<<10)
#define FOG_SHADE_MODE(x) ((x)<<8)
#define SPEC_SHADE_MODE(x) ((x)<<6)
#define COLOR_SHADE_MODE(x) ((x)<<4)
#define CULLMODE_MASK 0xf
#define ENABLE_CULL_MODE (1<<3)
#define CULLMODE_BOTH 0
#define CULLMODE_NONE 1
#define CULLMODE_CW 2
#define CULLMODE_CCW 3
#define SHADE_MODE_LINEAR 0
#define SHADE_MODE_FLAT 0x1
/* _3DSTATE_MODES_4, p195 */
#define _3DSTATE_MODES_4_CMD (CMD_3D|(0x16<<24))
#define ENABLE_LOGIC_OP_FUNC (1<<23)
#define LOGIC_OP_FUNC(x) ((x)<<18)
#define LOGICOP_MASK ((1<<18)|(1<<19)|(1<<20)|(1<<21))
#define LOGICOP_CLEAR 0
#define LOGICOP_NOR 0x1
#define LOGICOP_AND_INV 0x2
#define LOGICOP_COPY_INV 0x3
#define LOGICOP_AND_RVRSE 0x4
#define LOGICOP_INV 0x5
#define LOGICOP_XOR 0x6
#define LOGICOP_NAND 0x7
#define LOGICOP_AND 0x8
#define LOGICOP_EQUIV 0x9
#define LOGICOP_NOOP 0xa
#define LOGICOP_OR_INV 0xb
#define LOGICOP_COPY 0xc
#define LOGICOP_OR_RVRSE 0xd
#define LOGICOP_OR 0xe
#define LOGICOP_SET 0xf
#define MODE4_ENABLE_STENCIL_TEST_MASK ((1<<17)|(0xff00))
#define ENABLE_STENCIL_TEST_MASK (1<<17)
#define STENCIL_TEST_MASK(x) (((x)&0xff)<<8)
#define MODE4_ENABLE_STENCIL_WRITE_MASK ((1<<16)|(0x00ff))
#define ENABLE_STENCIL_WRITE_MASK (1<<16)
#define STENCIL_WRITE_MASK(x) ((x)&0xff)
/* _3DSTATE_MODES_5, p196 */
#define _3DSTATE_MODES_5_CMD (CMD_3D|(0x0c<<24))
#define ENABLE_SPRITE_POINT_TEX (1<<23)
#define SPRITE_POINT_TEX_ON (1<<22)
#define SPRITE_POINT_TEX_OFF 0
#define FLUSH_RENDER_CACHE (1<<18)
#define FLUSH_TEXTURE_CACHE (1<<16)
#define FIXED_LINE_WIDTH_MASK 0xfc00
#define ENABLE_FIXED_LINE_WIDTH (1<<15)
#define FIXED_LINE_WIDTH(x) ((x)<<10)
#define FIXED_POINT_WIDTH_MASK 0x3ff
#define ENABLE_FIXED_POINT_WIDTH (1<<9)
#define FIXED_POINT_WIDTH(x) (x)
/* _3DSTATE_RASTERIZATION_RULES, p198 */
#define _3DSTATE_RASTER_RULES_CMD (CMD_3D|(0x07<<24))
#define ENABLE_POINT_RASTER_RULE (1<<15)
#define OGL_POINT_RASTER_RULE (1<<13)
#define ENABLE_LINE_STRIP_PROVOKE_VRTX (1<<8)
#define ENABLE_TRI_FAN_PROVOKE_VRTX (1<<5)
#define ENABLE_TRI_STRIP_PROVOKE_VRTX (1<<2)
#define LINE_STRIP_PROVOKE_VRTX_MASK (3<<6)
#define LINE_STRIP_PROVOKE_VRTX(x) ((x)<<6)
#define TRI_FAN_PROVOKE_VRTX_MASK (3<<3)
#define TRI_FAN_PROVOKE_VRTX(x) ((x)<<3)
#define TRI_STRIP_PROVOKE_VRTX_MASK (3<<0)
#define TRI_STRIP_PROVOKE_VRTX(x) (x)
/* _3DSTATE_SCISSOR_ENABLE, p200 */
#define _3DSTATE_SCISSOR_ENABLE_CMD (CMD_3D|(0x1c<<24)|(0x10<<19))
#define ENABLE_SCISSOR_RECT ((1<<1) | 1)
#define DISABLE_SCISSOR_RECT (1<<1)
/* _3DSTATE_SCISSOR_RECTANGLE_0, p201 */
#define _3DSTATE_SCISSOR_RECT_0_CMD (CMD_3D|(0x1d<<24)|(0x81<<16)|1)
/* Dword 1 */
#define SCISSOR_RECT_0_YMIN(x) ((x)<<16)
#define SCISSOR_RECT_0_XMIN(x) (x)
/* Dword 2 */
#define SCISSOR_RECT_0_YMAX(x) ((x)<<16)
#define SCISSOR_RECT_0_XMAX(x) (x)
/* _3DSTATE_STENCIL_TEST, p202 */
#define _3DSTATE_STENCIL_TEST_CMD (CMD_3D|(0x09<<24))
#define ENABLE_STENCIL_PARMS (1<<23)
#define STENCIL_OPS_MASK (0xffc000)
#define STENCIL_FAIL_OP(x) ((x)<<20)
#define STENCIL_PASS_DEPTH_FAIL_OP(x) ((x)<<17)
#define STENCIL_PASS_DEPTH_PASS_OP(x) ((x)<<14)
#define ENABLE_STENCIL_TEST_FUNC_MASK ((1<<13)|(1<<12)|(1<<11)|(1<<10)|(1<<9))
#define ENABLE_STENCIL_TEST_FUNC (1<<13)
/* Uses COMPAREFUNC */
#define STENCIL_TEST_FUNC(x) ((x)<<9)
#define STENCIL_REF_VALUE_MASK ((1<<8)|0xff)
#define ENABLE_STENCIL_REF_VALUE (1<<8)
#define STENCIL_REF_VALUE(x) (x)
/* _3DSTATE_VERTEX_FORMAT, p204 */
#define _3DSTATE_VFT0_CMD (CMD_3D|(0x05<<24))
#define VFT0_POINT_WIDTH (1<<12)
#define VFT0_TEX_COUNT_MASK (7<<8)
#define VFT0_TEX_COUNT_SHIFT 8
#define VFT0_TEX_COUNT(x) ((x)<<8)
#define VFT0_SPEC (1<<7)
#define VFT0_DIFFUSE (1<<6)
#define VFT0_DEPTH_OFFSET (1<<5)
#define VFT0_XYZ (1<<1)
#define VFT0_XYZW (2<<1)
#define VFT0_XY (3<<1)
#define VFT0_XYW (4<<1)
#define VFT0_XYZW_MASK (7<<1)
/* _3DSTATE_VERTEX_FORMAT_2, p206 */
#define _3DSTATE_VFT1_CMD (CMD_3D|(0x0a<<24))
#define VFT1_TEX7_FMT(x) ((x)<<14)
#define VFT1_TEX6_FMT(x) ((x)<<12)
#define VFT1_TEX5_FMT(x) ((x)<<10)
#define VFT1_TEX4_FMT(x) ((x)<<8)
#define VFT1_TEX3_FMT(x) ((x)<<6)
#define VFT1_TEX2_FMT(x) ((x)<<4)
#define VFT1_TEX1_FMT(x) ((x)<<2)
#define VFT1_TEX0_FMT(x) (x)
#define VFT1_TEX0_MASK 3
#define VFT1_TEX1_SHIFT 2
/*New stuff picked up along the way */
#define MLC_LOD_BIAS_MASK ((1<<7)-1)
/* _3DSTATE_VERTEX_TRANSFORM, p207 */
#define _3DSTATE_VERTEX_TRANS_CMD (CMD_3D|(0x1d<<24)|(0x8b<<16)|0)
#define _3DSTATE_VERTEX_TRANS_MTX_CMD (CMD_3D|(0x1d<<24)|(0x8b<<16)|6)
/* Dword 1 */
#define ENABLE_VIEWPORT_TRANSFORM ((1<<31)|(1<<30))
#define DISABLE_VIEWPORT_TRANSFORM (1<<31)
#define ENABLE_PERSP_DIVIDE ((1<<29)|(1<<28))
#define DISABLE_PERSP_DIVIDE (1<<29)
#define VRTX_TRANS_LOAD_MATRICES 0x7421
#define VRTX_TRANS_NO_LOAD_MATRICES 0x0000
/* Dword 2 -> 7 are matrix elements */
/* _3DSTATE_W_STATE, p209 */
#define _3DSTATE_W_STATE_CMD (CMD_3D|(0x1d<<24)|(0x8d<<16)|1)
/* Dword 1 */
#define MAGIC_W_STATE_DWORD1 0x00000008
/* Dword 2 */
#define WFAR_VALUE(x) (x)
/* Stipple command, carried over from the i810, apparently:
*/
#define _3DSTATE_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
#define ST1_ENABLE (1<<16)
#define ST1_MASK (0xffff)
#define _3DSTATE_LOAD_STATE_IMMEDIATE_2 ((0x3<<29)|(0x1d<<24)|(0x03<<16))
#define LOAD_TEXTURE_MAP0 (1<<11)
#define LOAD_GLOBAL_COLOR_FACTOR (1<<6)
#define TM0S0_ADDRESS_MASK 0xfffffffc
#define TM0S0_USE_FENCE (1<<1)
#define TM0S1_HEIGHT_SHIFT 21
#define TM0S1_WIDTH_SHIFT 10
#define TM0S1_PALETTE_SELECT (1<<9)
#define TM0S1_MAPSURF_FORMAT_MASK (0x7 << 6)
#define TM0S1_MAPSURF_FORMAT_SHIFT 6
#define MAPSURF_8BIT_INDEXED (0<<6)
#define MAPSURF_8BIT (1<<6)
#define MAPSURF_16BIT (2<<6)
#define MAPSURF_32BIT (3<<6)
#define MAPSURF_411 (4<<6)
#define MAPSURF_422 (5<<6)
#define MAPSURF_COMPRESSED (6<<6)
#define MAPSURF_4BIT_INDEXED (7<<6)
#define TM0S1_MT_FORMAT_MASK (0x7 << 3)
#define TM0S1_MT_FORMAT_SHIFT 3
#define MT_4BIT_IDX_ARGB8888 (7<<3) /* SURFACE_4BIT_INDEXED */
#define MT_8BIT_IDX_RGB565 (0<<3) /* SURFACE_8BIT_INDEXED */
#define MT_8BIT_IDX_ARGB1555 (1<<3)
#define MT_8BIT_IDX_ARGB4444 (2<<3)
#define MT_8BIT_IDX_AY88 (3<<3)
#define MT_8BIT_IDX_ABGR8888 (4<<3)
#define MT_8BIT_IDX_BUMP_88DVDU (5<<3)
#define MT_8BIT_IDX_BUMP_655LDVDU (6<<3)
#define MT_8BIT_IDX_ARGB8888 (7<<3)
#define MT_8BIT_I8 (0<<3) /* SURFACE_8BIT */
#define MT_8BIT_L8 (1<<3)
#define MT_16BIT_RGB565 (0<<3) /* SURFACE_16BIT */
#define MT_16BIT_ARGB1555 (1<<3)
#define MT_16BIT_ARGB4444 (2<<3)
#define MT_16BIT_AY88 (3<<3)
#define MT_16BIT_DIB_ARGB1555_8888 (4<<3)
#define MT_16BIT_BUMP_88DVDU (5<<3)
#define MT_16BIT_BUMP_655LDVDU (6<<3)
#define MT_16BIT_DIB_RGB565_8888 (7<<3)
#define MT_32BIT_ARGB8888 (0<<3) /* SURFACE_32BIT */
#define MT_32BIT_ABGR8888 (1<<3)
#define MT_32BIT_XRGB8888 (2<<3) /* XXX: Guess from i915_reg.h */
#define MT_32BIT_BUMP_XLDVDU_8888 (6<<3)
#define MT_32BIT_DIB_8888 (7<<3)
#define MT_411_YUV411 (0<<3) /* SURFACE_411 */
#define MT_422_YCRCB_SWAPY (0<<3) /* SURFACE_422 */
#define MT_422_YCRCB_NORMAL (1<<3)
#define MT_422_YCRCB_SWAPUV (2<<3)
#define MT_422_YCRCB_SWAPUVY (3<<3)
#define MT_COMPRESS_DXT1 (0<<3) /* SURFACE_COMPRESSED */
#define MT_COMPRESS_DXT2_3 (1<<3)
#define MT_COMPRESS_DXT4_5 (2<<3)
#define MT_COMPRESS_FXT1 (3<<3)
#define TM0S1_COLORSPACE_CONVERSION (1 << 2)
#define TM0S1_TILED_SURFACE (1 << 1)
#define TM0S1_TILE_WALK (1 << 0)
#define TM0S2_PITCH_SHIFT 21
#define TM0S2_CUBE_FACE_ENA_SHIFT 15
#define TM0S2_CUBE_FACE_ENA_MASK (1<<15)
#define TM0S2_MAP_FORMAT (1<<14)
#define TM0S2_VERTICAL_LINE_STRIDE (1<<13)
#define TM0S2_VERITCAL_LINE_STRIDE_OFF (1<<12)
#define TM0S2_OUTPUT_CHAN_SHIFT 10
#define TM0S2_OUTPUT_CHAN_MASK (3<<10)
#define TM0S2_BASE_MIP_LEVEL_SHIFT 1
#define TM0S2_LOD_PRECLAMP (1 << 0)
#define TM0S3_MIP_FILTER_MASK (0x3<<30)
#define TM0S3_MIP_FILTER_SHIFT 30
#define MIPFILTER_NONE 0
#define MIPFILTER_NEAREST 1
#define MIPFILTER_LINEAR 3
#define TM0S3_MAG_FILTER_MASK (0x3<<28)
#define TM0S3_MAG_FILTER_SHIFT 28
#define TM0S3_MIN_FILTER_MASK (0x3<<26)
#define TM0S3_MIN_FILTER_SHIFT 26
#define FILTER_NEAREST 0
#define FILTER_LINEAR 1
#define FILTER_ANISOTROPIC 2
#define TM0S3_LOD_BIAS_SHIFT 17
#define TM0S3_LOD_BIAS_MASK (0x1ff<<17)
#define TM0S3_MAX_MIP_SHIFT 9
#define TM0S3_MAX_MIP_MASK (0xff<<9)
#define TM0S3_MIN_MIP_SHIFT 3
#define TM0S3_MIN_MIP_MASK (0x3f<<3)
#define TM0S3_MIN_MIP_SHIFT_830 5
#define TM0S3_MIN_MIP_MASK_830 (0x3f<<5)
#define TM0S3_KILL_PIXEL (1<<2)
#define TM0S3_KEYED_FILTER (1<<1)
#define TM0S3_CHROMA_KEY (1<<0)
/* _3DSTATE_MAP_TEXEL_STREAM, p188 */
#define _3DSTATE_MAP_TEX_STREAM_CMD (CMD_3D|(0x1c<<24)|(0x05<<19))
#define DISABLE_TEX_STREAM_BUMP (1<<12)
#define ENABLE_TEX_STREAM_BUMP ((1<<12)|(1<<11))
#define TEX_MODIFY_UNIT_0 0
#define TEX_MODIFY_UNIT_1 (1<<8)
#define ENABLE_TEX_STREAM_COORD_SET (1<<7)
#define TEX_STREAM_COORD_SET(x) ((x)<<4)
#define ENABLE_TEX_STREAM_MAP_IDX (1<<3)
#define TEX_STREAM_MAP_IDX(x) (x)
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,455 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/glheader.h"
#include "main/macros.h"
#include "main/mtypes.h"
#include "main/enums.h"
#include "intel_screen.h"
#include "intel_tex.h"
#include "i830_context.h"
#include "i830_reg.h"
/* ================================================================
* Texture combine functions
*/
static GLuint
pass_through(GLuint * state, GLuint blendUnit)
{
state[0] = (_3DSTATE_MAP_BLEND_OP_CMD(blendUnit) |
TEXPIPE_COLOR |
ENABLE_TEXOUTPUT_WRT_SEL |
TEXOP_OUTPUT_CURRENT |
DISABLE_TEX_CNTRL_STAGE |
TEXOP_SCALE_1X | TEXOP_MODIFY_PARMS | TEXBLENDOP_ARG1);
state[1] = (_3DSTATE_MAP_BLEND_OP_CMD(blendUnit) |
TEXPIPE_ALPHA |
ENABLE_TEXOUTPUT_WRT_SEL |
TEXOP_OUTPUT_CURRENT |
TEXOP_SCALE_1X | TEXOP_MODIFY_PARMS | TEXBLENDOP_ARG1);
state[2] = (_3DSTATE_MAP_BLEND_ARG_CMD(blendUnit) |
TEXPIPE_COLOR |
TEXBLEND_ARG1 |
TEXBLENDARG_MODIFY_PARMS | TEXBLENDARG_CURRENT);
state[3] = (_3DSTATE_MAP_BLEND_ARG_CMD(blendUnit) |
TEXPIPE_ALPHA |
TEXBLEND_ARG1 |
TEXBLENDARG_MODIFY_PARMS | TEXBLENDARG_CURRENT);
return 4;
}
static GLuint
emit_factor(GLuint blendUnit, GLuint * state, GLuint count,
const GLfloat * factor)
{
GLubyte r, g, b, a;
GLuint col;
if (0)
fprintf(stderr, "emit constant %d: %.2f %.2f %.2f %.2f\n",
blendUnit, factor[0], factor[1], factor[2], factor[3]);
UNCLAMPED_FLOAT_TO_UBYTE(r, factor[0]);
UNCLAMPED_FLOAT_TO_UBYTE(g, factor[1]);
UNCLAMPED_FLOAT_TO_UBYTE(b, factor[2]);
UNCLAMPED_FLOAT_TO_UBYTE(a, factor[3]);
col = ((a << 24) | (r << 16) | (g << 8) | b);
state[count++] = _3DSTATE_COLOR_FACTOR_N_CMD(blendUnit);
state[count++] = col;
return count;
}
static inline GLuint
GetTexelOp(GLint unit)
{
switch (unit) {
case 0:
return TEXBLENDARG_TEXEL0;
case 1:
return TEXBLENDARG_TEXEL1;
case 2:
return TEXBLENDARG_TEXEL2;
case 3:
return TEXBLENDARG_TEXEL3;
default:
return TEXBLENDARG_TEXEL0;
}
}
/**
* Calculate the hardware instuctions to setup the current texture enviromnemt
* settings. Since \c gl_texture_unit::_CurrentCombine is used, both
* "classic" texture enviroments and GL_ARB_texture_env_combine type texture
* environments are treated identically.
*
* \todo
* This function should return \c bool. When \c false is returned,
* it means that an environment is selected that the hardware cannot do. This
* is the way the Radeon and R200 drivers work.
*
* \todo
* Looking at i830_3d_regs.h, it seems the i830 can do part of
* GL_ATI_texture_env_combine3. It can handle using \c GL_ONE and
* \c GL_ZERO as combine inputs (which the code already supports). It can
* also handle the \c GL_MODULATE_ADD_ATI mode. Is it worth investigating
* partial support for the extension?
*/
GLuint
i830SetTexEnvCombine(struct i830_context * i830,
const struct gl_tex_env_combine_state * combine,
GLint blendUnit,
GLuint texel_op, GLuint * state, const GLfloat * factor)
{
const GLuint numColorArgs = combine->_NumArgsRGB;
GLuint numAlphaArgs = combine->_NumArgsA;
GLuint blendop;
GLuint ablendop;
GLuint args_RGB[3];
GLuint args_A[3];
GLuint rgb_shift;
GLuint alpha_shift;
bool need_factor = 0;
int i;
unsigned used;
static const GLuint tex_blend_rgb[3] = {
TEXPIPE_COLOR | TEXBLEND_ARG1 | TEXBLENDARG_MODIFY_PARMS,
TEXPIPE_COLOR | TEXBLEND_ARG2 | TEXBLENDARG_MODIFY_PARMS,
TEXPIPE_COLOR | TEXBLEND_ARG0 | TEXBLENDARG_MODIFY_PARMS,
};
static const GLuint tex_blend_a[3] = {
TEXPIPE_ALPHA | TEXBLEND_ARG1 | TEXBLENDARG_MODIFY_PARMS,
TEXPIPE_ALPHA | TEXBLEND_ARG2 | TEXBLENDARG_MODIFY_PARMS,
TEXPIPE_ALPHA | TEXBLEND_ARG0 | TEXBLENDARG_MODIFY_PARMS,
};
if (INTEL_DEBUG & DEBUG_TEXTURE)
fprintf(stderr, "%s\n", __func__);
/* The EXT version of the DOT3 extension does not support the
* scale factor, but the ARB version (and the version in OpenGL
* 1.3) does.
*/
switch (combine->ModeRGB) {
case GL_DOT3_RGB_EXT:
alpha_shift = combine->ScaleShiftA;
rgb_shift = 0;
break;
case GL_DOT3_RGBA_EXT:
alpha_shift = 0;
rgb_shift = 0;
break;
default:
rgb_shift = combine->ScaleShiftRGB;
alpha_shift = combine->ScaleShiftA;
break;
}
switch (combine->ModeRGB) {
case GL_REPLACE:
blendop = TEXBLENDOP_ARG1;
break;
case GL_MODULATE:
blendop = TEXBLENDOP_MODULATE;
break;
case GL_ADD:
blendop = TEXBLENDOP_ADD;
break;
case GL_ADD_SIGNED:
blendop = TEXBLENDOP_ADDSIGNED;
break;
case GL_INTERPOLATE:
blendop = TEXBLENDOP_BLEND;
break;
case GL_SUBTRACT:
blendop = TEXBLENDOP_SUBTRACT;
break;
case GL_DOT3_RGB_EXT:
case GL_DOT3_RGB:
blendop = TEXBLENDOP_DOT3;
break;
case GL_DOT3_RGBA_EXT:
case GL_DOT3_RGBA:
blendop = TEXBLENDOP_DOT4;
break;
default:
return pass_through(state, blendUnit);
}
blendop |= (rgb_shift << TEXOP_SCALE_SHIFT);
/* Handle RGB args */
for (i = 0; i < 3; i++) {
switch (combine->SourceRGB[i]) {
case GL_TEXTURE:
args_RGB[i] = texel_op;
break;
case GL_TEXTURE0:
case GL_TEXTURE1:
case GL_TEXTURE2:
case GL_TEXTURE3:
args_RGB[i] = GetTexelOp(combine->SourceRGB[i] - GL_TEXTURE0);
break;
case GL_CONSTANT:
args_RGB[i] = TEXBLENDARG_FACTOR_N;
need_factor = 1;
break;
case GL_PRIMARY_COLOR:
args_RGB[i] = TEXBLENDARG_DIFFUSE;
break;
case GL_PREVIOUS:
args_RGB[i] = TEXBLENDARG_CURRENT;
break;
default:
return pass_through(state, blendUnit);
}
switch (combine->OperandRGB[i]) {
case GL_SRC_COLOR:
args_RGB[i] |= 0;
break;
case GL_ONE_MINUS_SRC_COLOR:
args_RGB[i] |= TEXBLENDARG_INV_ARG;
break;
case GL_SRC_ALPHA:
args_RGB[i] |= TEXBLENDARG_REPLICATE_ALPHA;
break;
case GL_ONE_MINUS_SRC_ALPHA:
args_RGB[i] |= (TEXBLENDARG_REPLICATE_ALPHA | TEXBLENDARG_INV_ARG);
break;
default:
return pass_through(state, blendUnit);
}
}
/* Need to knobble the alpha calculations of TEXBLENDOP_DOT4 to
* match the spec. Can't use DOT3 as it won't propogate values
* into alpha as required:
*
* Note - the global factor is set up with alpha == .5, so
* the alpha part of the DOT4 calculation should be zero.
*/
if (combine->ModeRGB == GL_DOT3_RGBA_EXT ||
combine->ModeRGB == GL_DOT3_RGBA) {
ablendop = TEXBLENDOP_DOT4;
numAlphaArgs = 2;
args_A[0] = TEXBLENDARG_FACTOR; /* the global factor */
args_A[1] = TEXBLENDARG_FACTOR;
args_A[2] = TEXBLENDARG_FACTOR;
}
else {
switch (combine->ModeA) {
case GL_REPLACE:
ablendop = TEXBLENDOP_ARG1;
break;
case GL_MODULATE:
ablendop = TEXBLENDOP_MODULATE;
break;
case GL_ADD:
ablendop = TEXBLENDOP_ADD;
break;
case GL_ADD_SIGNED:
ablendop = TEXBLENDOP_ADDSIGNED;
break;
case GL_INTERPOLATE:
ablendop = TEXBLENDOP_BLEND;
break;
case GL_SUBTRACT:
ablendop = TEXBLENDOP_SUBTRACT;
break;
default:
return pass_through(state, blendUnit);
}
ablendop |= (alpha_shift << TEXOP_SCALE_SHIFT);
/* Handle A args */
for (i = 0; i < 3; i++) {
switch (combine->SourceA[i]) {
case GL_TEXTURE:
args_A[i] = texel_op;
break;
case GL_TEXTURE0:
case GL_TEXTURE1:
case GL_TEXTURE2:
case GL_TEXTURE3:
args_A[i] = GetTexelOp(combine->SourceA[i] - GL_TEXTURE0);
break;
case GL_CONSTANT:
args_A[i] = TEXBLENDARG_FACTOR_N;
need_factor = 1;
break;
case GL_PRIMARY_COLOR:
args_A[i] = TEXBLENDARG_DIFFUSE;
break;
case GL_PREVIOUS:
args_A[i] = TEXBLENDARG_CURRENT;
break;
default:
return pass_through(state, blendUnit);
}
switch (combine->OperandA[i]) {
case GL_SRC_ALPHA:
args_A[i] |= 0;
break;
case GL_ONE_MINUS_SRC_ALPHA:
args_A[i] |= TEXBLENDARG_INV_ARG;
break;
default:
return pass_through(state, blendUnit);
}
}
}
/* Native Arg1 == Arg0 in GL_EXT_texture_env_combine spec */
/* Native Arg2 == Arg1 in GL_EXT_texture_env_combine spec */
/* Native Arg0 == Arg2 in GL_EXT_texture_env_combine spec */
/* When we render we need to figure out which is the last really enabled
* tex unit, and put last stage on it
*/
/* Build color & alpha pipelines */
used = 0;
state[used++] = (_3DSTATE_MAP_BLEND_OP_CMD(blendUnit) |
TEXPIPE_COLOR |
ENABLE_TEXOUTPUT_WRT_SEL |
TEXOP_OUTPUT_CURRENT |
DISABLE_TEX_CNTRL_STAGE | TEXOP_MODIFY_PARMS | blendop);
state[used++] = (_3DSTATE_MAP_BLEND_OP_CMD(blendUnit) |
TEXPIPE_ALPHA |
ENABLE_TEXOUTPUT_WRT_SEL |
TEXOP_OUTPUT_CURRENT | TEXOP_MODIFY_PARMS | ablendop);
for (i = 0; i < numColorArgs; i++) {
state[used++] = (_3DSTATE_MAP_BLEND_ARG_CMD(blendUnit) |
tex_blend_rgb[i] | args_RGB[i]);
}
for (i = 0; i < numAlphaArgs; i++) {
state[used++] = (_3DSTATE_MAP_BLEND_ARG_CMD(blendUnit) |
tex_blend_a[i] | args_A[i]);
}
if (need_factor)
return emit_factor(blendUnit, state, used, factor);
else
return used;
}
static void
emit_texblend(struct i830_context *i830, GLuint unit, GLuint blendUnit,
bool last_stage)
{
struct gl_fixedfunc_texture_unit *texUnit =
&i830->intel.ctx.Texture.FixedFuncUnit[unit];
GLuint tmp[I830_TEXBLEND_SIZE], tmp_sz;
if (0)
fprintf(stderr, "%s unit %d\n", __func__, unit);
/* Update i830->state.TexBlend
*/
tmp_sz = i830SetTexEnvCombine(i830, texUnit->_CurrentCombine, blendUnit,
GetTexelOp(unit), tmp, texUnit->EnvColor);
if (last_stage)
tmp[0] |= TEXOP_LAST_STAGE;
if (tmp_sz != i830->state.TexBlendWordsUsed[blendUnit] ||
memcmp(tmp, i830->state.TexBlend[blendUnit],
tmp_sz * sizeof(GLuint))) {
I830_STATECHANGE(i830, I830_UPLOAD_TEXBLEND(blendUnit));
memcpy(i830->state.TexBlend[blendUnit], tmp, tmp_sz * sizeof(GLuint));
i830->state.TexBlendWordsUsed[blendUnit] = tmp_sz;
}
I830_ACTIVESTATE(i830, I830_UPLOAD_TEXBLEND(blendUnit), true);
}
static void
emit_passthrough(struct i830_context *i830)
{
GLuint tmp[I830_TEXBLEND_SIZE], tmp_sz;
GLuint unit = 0;
tmp_sz = pass_through(tmp, unit);
tmp[0] |= TEXOP_LAST_STAGE;
if (tmp_sz != i830->state.TexBlendWordsUsed[unit] ||
memcmp(tmp, i830->state.TexBlend[unit], tmp_sz * sizeof(GLuint))) {
I830_STATECHANGE(i830, I830_UPLOAD_TEXBLEND(unit));
memcpy(i830->state.TexBlend[unit], tmp, tmp_sz * sizeof(GLuint));
i830->state.TexBlendWordsUsed[unit] = tmp_sz;
}
I830_ACTIVESTATE(i830, I830_UPLOAD_TEXBLEND(unit), true);
}
void
i830EmitTextureBlend(struct i830_context *i830)
{
struct gl_context *ctx = &i830->intel.ctx;
GLuint unit, blendunit = 0;
I830_ACTIVESTATE(i830, I830_UPLOAD_TEXBLEND_ALL, false);
if (ctx->Texture._MaxEnabledTexImageUnit != -1) {
for (unit = 0; unit <= ctx->Texture._MaxEnabledTexImageUnit; unit++)
if (ctx->Texture.Unit[unit]._Current)
emit_texblend(i830, unit, blendunit++,
unit == ctx->Texture._MaxEnabledTexImageUnit);
} else {
emit_passthrough(i830);
}
}

View File

@ -1,365 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/mtypes.h"
#include "main/enums.h"
#include "main/colormac.h"
#include "main/macros.h"
#include "main/samplerobj.h"
#include "intel_mipmap_tree.h"
#include "intel_tex.h"
#include "i830_context.h"
#include "i830_reg.h"
#include "intel_chipset.h"
static GLuint
translate_texture_format(GLuint mesa_format)
{
switch (mesa_format) {
case MESA_FORMAT_L_UNORM8:
return MAPSURF_8BIT | MT_8BIT_L8;
case MESA_FORMAT_I_UNORM8:
return MAPSURF_8BIT | MT_8BIT_I8;
case MESA_FORMAT_LA_UNORM8:
return MAPSURF_16BIT | MT_16BIT_AY88;
case MESA_FORMAT_B5G6R5_UNORM:
return MAPSURF_16BIT | MT_16BIT_RGB565;
case MESA_FORMAT_B5G5R5A1_UNORM:
return MAPSURF_16BIT | MT_16BIT_ARGB1555;
case MESA_FORMAT_B4G4R4A4_UNORM:
return MAPSURF_16BIT | MT_16BIT_ARGB4444;
case MESA_FORMAT_B8G8R8A8_UNORM:
return MAPSURF_32BIT | MT_32BIT_ARGB8888;
case MESA_FORMAT_B8G8R8X8_UNORM:
return MAPSURF_32BIT | MT_32BIT_XRGB8888;
case MESA_FORMAT_YCBCR_REV:
return (MAPSURF_422 | MT_422_YCRCB_NORMAL);
case MESA_FORMAT_YCBCR:
return (MAPSURF_422 | MT_422_YCRCB_SWAPY);
case MESA_FORMAT_RGB_FXT1:
case MESA_FORMAT_RGBA_FXT1:
return (MAPSURF_COMPRESSED | MT_COMPRESS_FXT1);
case MESA_FORMAT_RGBA_DXT1:
case MESA_FORMAT_RGB_DXT1:
return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT1);
case MESA_FORMAT_RGBA_DXT3:
return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT2_3);
case MESA_FORMAT_RGBA_DXT5:
return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT4_5);
default:
fprintf(stderr, "%s: bad image format %s\n", __func__,
_mesa_get_format_name(mesa_format));
abort();
return 0;
}
}
/* The i915 (and related graphics cores) do not support GL_CLAMP. The
* Intel drivers for "other operating systems" implement GL_CLAMP as
* GL_CLAMP_TO_EDGE, so the same is done here.
*/
static GLuint
translate_wrap_mode(GLenum wrap)
{
switch (wrap) {
case GL_REPEAT:
return TEXCOORDMODE_WRAP;
case GL_CLAMP:
case GL_CLAMP_TO_EDGE:
return TEXCOORDMODE_CLAMP; /* not really correct */
case GL_CLAMP_TO_BORDER:
return TEXCOORDMODE_CLAMP_BORDER;
case GL_MIRRORED_REPEAT:
return TEXCOORDMODE_MIRROR;
default:
return TEXCOORDMODE_WRAP;
}
}
/* Recalculate all state from scratch. Perhaps not the most
* efficient, but this has gotten complex enough that we need
* something which is understandable and reliable.
*/
static bool
i830_update_tex_unit(struct intel_context *intel, GLuint unit, GLuint ss3)
{
struct gl_context *ctx = &intel->ctx;
struct i830_context *i830 = i830_context(ctx);
struct gl_texture_unit *tUnit = &ctx->Texture.Unit[unit];
struct gl_texture_object *tObj = tUnit->_Current;
struct intel_texture_object *intelObj = intel_texture_object(tObj);
struct gl_texture_image *firstImage;
struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
GLuint *state = i830->state.Tex[unit], format, pitch;
GLint lodbias;
GLubyte border[4];
GLuint dst_x, dst_y;
memset(state, 0, sizeof(*state));
/*We need to refcount these. */
if (i830->state.tex_buffer[unit] != NULL) {
drm_intel_bo_unreference(i830->state.tex_buffer[unit]);
i830->state.tex_buffer[unit] = NULL;
}
if (!intel_finalize_mipmap_tree(intel, unit))
return false;
/* Get first image here, since intelObj->firstLevel will get set in
* the intel_finalize_mipmap_tree() call above.
*/
firstImage = tObj->Image[0][tObj->Attrib.BaseLevel];
intel_miptree_get_image_offset(intelObj->mt, tObj->Attrib.BaseLevel, 0,
&dst_x, &dst_y);
drm_intel_bo_reference(intelObj->mt->region->bo);
i830->state.tex_buffer[unit] = intelObj->mt->region->bo;
pitch = intelObj->mt->region->pitch;
/* XXX: This calculation is probably broken for tiled images with
* a non-page-aligned offset.
*/
i830->state.tex_offset[unit] = dst_x * intelObj->mt->cpp + dst_y * pitch;
format = translate_texture_format(firstImage->TexFormat);
state[I830_TEXREG_TM0LI] = (_3DSTATE_LOAD_STATE_IMMEDIATE_2 |
(LOAD_TEXTURE_MAP0 << unit) | 4);
state[I830_TEXREG_TM0S1] =
(((firstImage->Height - 1) << TM0S1_HEIGHT_SHIFT) |
((firstImage->Width - 1) << TM0S1_WIDTH_SHIFT) | format);
if (intelObj->mt->region->tiling != I915_TILING_NONE) {
state[I830_TEXREG_TM0S1] |= TM0S1_TILED_SURFACE;
if (intelObj->mt->region->tiling == I915_TILING_Y)
state[I830_TEXREG_TM0S1] |= TM0S1_TILE_WALK;
}
state[I830_TEXREG_TM0S2] =
((((pitch / 4) - 1) << TM0S2_PITCH_SHIFT) | TM0S2_CUBE_FACE_ENA_MASK);
{
if (tObj->Target == GL_TEXTURE_CUBE_MAP)
state[I830_TEXREG_CUBE] = (_3DSTATE_MAP_CUBE | MAP_UNIT(unit) |
CUBE_NEGX_ENABLE |
CUBE_POSX_ENABLE |
CUBE_NEGY_ENABLE |
CUBE_POSY_ENABLE |
CUBE_NEGZ_ENABLE | CUBE_POSZ_ENABLE);
else
state[I830_TEXREG_CUBE] = (_3DSTATE_MAP_CUBE | MAP_UNIT(unit));
}
{
GLuint minFilt, mipFilt, magFilt;
float maxlod;
uint32_t minlod_fixed, maxlod_fixed;
switch (sampler->Attrib.MinFilter) {
case GL_NEAREST:
minFilt = FILTER_NEAREST;
mipFilt = MIPFILTER_NONE;
break;
case GL_LINEAR:
minFilt = FILTER_LINEAR;
mipFilt = MIPFILTER_NONE;
break;
case GL_NEAREST_MIPMAP_NEAREST:
minFilt = FILTER_NEAREST;
mipFilt = MIPFILTER_NEAREST;
break;
case GL_LINEAR_MIPMAP_NEAREST:
minFilt = FILTER_LINEAR;
mipFilt = MIPFILTER_NEAREST;
break;
case GL_NEAREST_MIPMAP_LINEAR:
minFilt = FILTER_NEAREST;
mipFilt = MIPFILTER_LINEAR;
break;
case GL_LINEAR_MIPMAP_LINEAR:
minFilt = FILTER_LINEAR;
mipFilt = MIPFILTER_LINEAR;
break;
default:
return false;
}
if (sampler->Attrib.MaxAnisotropy > 1.0) {
minFilt = FILTER_ANISOTROPIC;
magFilt = FILTER_ANISOTROPIC;
/* no trilinear + anisotropic */
mipFilt = MIPFILTER_NEAREST;
}
else {
switch (sampler->Attrib.MagFilter) {
case GL_NEAREST:
magFilt = FILTER_NEAREST;
break;
case GL_LINEAR:
magFilt = FILTER_LINEAR;
break;
default:
return false;
}
}
lodbias = (int) ((tUnit->LodBias + sampler->Attrib.LodBias) * 16.0);
if (lodbias < -64)
lodbias = -64;
if (lodbias > 63)
lodbias = 63;
state[I830_TEXREG_TM0S3] = ((lodbias << TM0S3_LOD_BIAS_SHIFT) &
TM0S3_LOD_BIAS_MASK);
#if 0
/* YUV conversion:
*/
if (firstImage->TexFormat->MesaFormat == MESA_FORMAT_YCBCR ||
firstImage->TexFormat->MesaFormat == MESA_FORMAT_YCBCR_REV)
state[I830_TEXREG_TM0S3] |= SS2_COLORSPACE_CONVERSION;
#endif
/* We get one field with fraction bits for the maximum
* addressable (smallest resolution) LOD. Use it to cover both
* MAX_LEVEL and MAX_LOD.
*/
minlod_fixed = U_FIXED(CLAMP(sampler->Attrib.MinLod, 0.0, 11), 4);
maxlod = MIN2(sampler->Attrib.MaxLod, tObj->_MaxLevel - tObj->Attrib.BaseLevel);
if (intel->intelScreen->deviceID == PCI_CHIP_I855_GM ||
intel->intelScreen->deviceID == PCI_CHIP_I865_G) {
maxlod_fixed = U_FIXED(CLAMP(maxlod, 0.0, 11.75), 2);
maxlod_fixed = MAX2(maxlod_fixed, (minlod_fixed + 3) >> 2);
state[I830_TEXREG_TM0S3] |= maxlod_fixed << TM0S3_MIN_MIP_SHIFT;
state[I830_TEXREG_TM0S2] |= TM0S2_LOD_PRECLAMP;
} else {
maxlod_fixed = U_FIXED(CLAMP(maxlod, 0.0, 11), 0);
maxlod_fixed = MAX2(maxlod_fixed, (minlod_fixed + 15) >> 4);
state[I830_TEXREG_TM0S3] |= maxlod_fixed << TM0S3_MIN_MIP_SHIFT_830;
}
state[I830_TEXREG_TM0S3] |= minlod_fixed << TM0S3_MAX_MIP_SHIFT;
state[I830_TEXREG_TM0S3] |= ((minFilt << TM0S3_MIN_FILTER_SHIFT) |
(mipFilt << TM0S3_MIP_FILTER_SHIFT) |
(magFilt << TM0S3_MAG_FILTER_SHIFT));
}
{
GLenum ws = sampler->Attrib.WrapS;
GLenum wt = sampler->Attrib.WrapT;
/* 3D textures not available on i830
*/
if (tObj->Target == GL_TEXTURE_3D)
return false;
state[I830_TEXREG_MCS] = (_3DSTATE_MAP_COORD_SET_CMD |
MAP_UNIT(unit) |
ENABLE_TEXCOORD_PARAMS |
ss3 |
ENABLE_ADDR_V_CNTL |
TEXCOORD_ADDR_V_MODE(translate_wrap_mode(wt))
| ENABLE_ADDR_U_CNTL |
TEXCOORD_ADDR_U_MODE(translate_wrap_mode
(ws)));
}
/* convert border color from float to ubyte */
CLAMPED_FLOAT_TO_UBYTE(border[0], sampler->Attrib.state.border_color.f[0]);
CLAMPED_FLOAT_TO_UBYTE(border[1], sampler->Attrib.state.border_color.f[1]);
CLAMPED_FLOAT_TO_UBYTE(border[2], sampler->Attrib.state.border_color.f[2]);
CLAMPED_FLOAT_TO_UBYTE(border[3], sampler->Attrib.state.border_color.f[3]);
state[I830_TEXREG_TM0S4] = PACK_COLOR_8888(border[3],
border[0],
border[1],
border[2]);
I830_ACTIVESTATE(i830, I830_UPLOAD_TEX(unit), true);
/* memcmp was already disabled, but definitely won't work as the
* region might now change and that wouldn't be detected:
*/
I830_STATECHANGE(i830, I830_UPLOAD_TEX(unit));
return true;
}
void
i830UpdateTextureState(struct intel_context *intel)
{
struct i830_context *i830 = i830_context(&intel->ctx);
bool ok = true;
GLuint i;
for (i = 0; i < I830_TEX_UNITS && ok; i++) {
if (intel->ctx.Texture.Unit[i]._Current) {
switch (intel->ctx.Texture.Unit[i]._Current->Target) {
case GL_TEXTURE_1D:
case GL_TEXTURE_2D:
case GL_TEXTURE_CUBE_MAP:
ok = i830_update_tex_unit(intel, i, TEXCOORDS_ARE_NORMAL);
break;
case GL_TEXTURE_RECTANGLE:
ok = i830_update_tex_unit(intel, i, TEXCOORDS_ARE_IN_TEXELUNITS);
break;
case GL_TEXTURE_3D:
default:
ok = false;
break;
}
} else {
struct i830_context *i830 = i830_context(&intel->ctx);
if (i830->state.active & I830_UPLOAD_TEX(i))
I830_ACTIVESTATE(i830, I830_UPLOAD_TEX(i), false);
if (i830->state.tex_buffer[i] != NULL) {
drm_intel_bo_unreference(i830->state.tex_buffer[i]);
i830->state.tex_buffer[i] = NULL;
}
}
}
FALLBACK(intel, I830_FALLBACK_TEXTURE, !ok);
if (ok)
i830EmitTextureBlend(i830);
}

View File

@ -1,894 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "i830_context.h"
#include "i830_reg.h"
#include "intel_batchbuffer.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
#include "intel_tris.h"
#include "intel_fbo.h"
#include "intel_buffers.h"
#include "tnl/tnl.h"
#include "tnl/t_context.h"
#include "tnl/t_vertex.h"
#include "swrast_setup/swrast_setup.h"
#include "main/renderbuffer.h"
#include "main/framebuffer.h"
#include "main/fbobject.h"
#define FILE_DEBUG_FLAG DEBUG_STATE
static bool i830_check_vertex_size(struct intel_context *intel,
GLuint expected);
#define SZ_TO_HW(sz) ((sz-2)&0x3)
#define EMIT_SZ(sz) (EMIT_1F + (sz) - 1)
#define EMIT_ATTR( ATTR, STYLE, V0 ) \
do { \
intel->vertex_attrs[intel->vertex_attr_count].attrib = (ATTR); \
intel->vertex_attrs[intel->vertex_attr_count].format = (STYLE); \
intel->vertex_attr_count++; \
v0 |= V0; \
} while (0)
#define EMIT_PAD( N ) \
do { \
intel->vertex_attrs[intel->vertex_attr_count].attrib = 0; \
intel->vertex_attrs[intel->vertex_attr_count].format = EMIT_PAD; \
intel->vertex_attrs[intel->vertex_attr_count].offset = (N); \
intel->vertex_attr_count++; \
} while (0)
#define VRTX_TEX_SET_FMT(n, x) ((x)<<((n)*2))
#define TEXBIND_SET(n, x) ((x)<<((n)*4))
static void
i830_render_prevalidate(struct intel_context *intel)
{
}
static void
i830_render_start(struct intel_context *intel)
{
struct gl_context *ctx = &intel->ctx;
struct i830_context *i830 = i830_context(ctx);
TNLcontext *tnl = TNL_CONTEXT(ctx);
struct vertex_buffer *VB = &tnl->vb;
GLbitfield64 index_bitset = tnl->render_inputs_bitset;
GLuint v0 = _3DSTATE_VFT0_CMD;
GLuint v2 = _3DSTATE_VFT1_CMD;
GLuint mcsb1 = 0;
/* Important:
*/
VB->AttribPtr[VERT_ATTRIB_POS] = VB->NdcPtr;
intel->vertex_attr_count = 0;
/* EMIT_ATTR's must be in order as they tell t_vertex.c how to
* build up a hardware vertex.
*/
if (index_bitset & BITFIELD64_RANGE(_TNL_ATTRIB_TEX0, _TNL_NUM_TEX)) {
EMIT_ATTR(_TNL_ATTRIB_POS, EMIT_4F_VIEWPORT, VFT0_XYZW);
intel->coloroffset = 4;
}
else {
EMIT_ATTR(_TNL_ATTRIB_POS, EMIT_3F_VIEWPORT, VFT0_XYZ);
intel->coloroffset = 3;
}
if (index_bitset & BITFIELD64_BIT(_TNL_ATTRIB_POINTSIZE)) {
EMIT_ATTR(_TNL_ATTRIB_POINTSIZE, EMIT_1F, VFT0_POINT_WIDTH);
}
EMIT_ATTR(_TNL_ATTRIB_COLOR0, EMIT_4UB_4F_BGRA, VFT0_DIFFUSE);
intel->specoffset = 0;
if (index_bitset & (BITFIELD64_BIT(_TNL_ATTRIB_COLOR1) |
BITFIELD64_BIT(_TNL_ATTRIB_FOG))) {
if (index_bitset & BITFIELD64_BIT(_TNL_ATTRIB_COLOR1)) {
intel->specoffset = intel->coloroffset + 1;
EMIT_ATTR(_TNL_ATTRIB_COLOR1, EMIT_3UB_3F_BGR, VFT0_SPEC);
}
else
EMIT_PAD(3);
if (index_bitset & BITFIELD64_BIT(_TNL_ATTRIB_FOG))
EMIT_ATTR(_TNL_ATTRIB_FOG, EMIT_1UB_1F, VFT0_SPEC);
else
EMIT_PAD(1);
}
if (index_bitset & BITFIELD64_RANGE(_TNL_ATTRIB_TEX0, _TNL_NUM_TEX)) {
int i, count = 0;
for (i = 0; i < I830_TEX_UNITS; i++) {
if (index_bitset & BITFIELD64_BIT(_TNL_ATTRIB_TEX(i))) {
GLuint sz = VB->AttribPtr[_TNL_ATTRIB_TEX0 + i]->size;
GLuint emit;
GLuint mcs = (i830->state.Tex[i][I830_TEXREG_MCS] &
~TEXCOORDTYPE_MASK);
if (intel->ctx.Texture.Unit[i]._Current->Target == GL_TEXTURE_CUBE_MAP) {
emit = EMIT_3F;
sz = 3;
mcs |= TEXCOORDTYPE_VECTOR;
} else {
switch (sz) {
case 1:
case 2:
case 3:
emit = EMIT_2F;
sz = 2;
mcs |= TEXCOORDTYPE_CARTESIAN;
break;
case 4:
emit = EMIT_3F_XYW;
sz = 3;
mcs |= TEXCOORDTYPE_HOMOGENEOUS;
break;
default:
continue;
}
}
EMIT_ATTR(_TNL_ATTRIB_TEX0 + i, emit, 0);
v2 |= VRTX_TEX_SET_FMT(count, SZ_TO_HW(sz));
mcsb1 |= (count + 8) << (i * 4);
if (mcs != i830->state.Tex[i][I830_TEXREG_MCS]) {
I830_STATECHANGE(i830, I830_UPLOAD_TEX(i));
i830->state.Tex[i][I830_TEXREG_MCS] = mcs;
}
count++;
}
}
v0 |= VFT0_TEX_COUNT(count);
}
/* Only need to change the vertex emit code if there has been a
* statechange to a new hardware vertex format:
*/
if (v0 != i830->state.Ctx[I830_CTXREG_VF] ||
v2 != i830->state.Ctx[I830_CTXREG_VF2] ||
mcsb1 != i830->state.Ctx[I830_CTXREG_MCSB1] ||
index_bitset != i830->last_index_bitset) {
I830_STATECHANGE(i830, I830_UPLOAD_CTX);
/* Must do this *after* statechange, so as not to affect
* buffered vertices reliant on the old state:
*/
intel->vertex_size =
_tnl_install_attrs(ctx,
intel->vertex_attrs,
intel->vertex_attr_count,
intel->ViewportMatrix.m, 0);
intel->vertex_size >>= 2;
i830->state.Ctx[I830_CTXREG_VF] = v0;
i830->state.Ctx[I830_CTXREG_VF2] = v2;
i830->state.Ctx[I830_CTXREG_MCSB1] = mcsb1;
i830->last_index_bitset = index_bitset;
assert(i830_check_vertex_size(intel, intel->vertex_size));
}
}
static void
i830_reduced_primitive_state(struct intel_context *intel, GLenum rprim)
{
struct i830_context *i830 = i830_context(&intel->ctx);
GLuint st1 = i830->state.Stipple[I830_STPREG_ST1];
st1 &= ~ST1_ENABLE;
switch (rprim) {
case GL_TRIANGLES:
if (intel->ctx.Polygon.StippleFlag && intel->hw_stipple)
st1 |= ST1_ENABLE;
break;
case GL_LINES:
case GL_POINTS:
default:
break;
}
i830->intel.reduced_primitive = rprim;
if (st1 != i830->state.Stipple[I830_STPREG_ST1]) {
INTEL_FIREVERTICES(intel);
I830_STATECHANGE(i830, I830_UPLOAD_STIPPLE);
i830->state.Stipple[I830_STPREG_ST1] = st1;
}
}
/* Pull apart the vertex format registers and figure out how large a
* vertex is supposed to be.
*/
static bool
i830_check_vertex_size(struct intel_context *intel, GLuint expected)
{
struct i830_context *i830 = i830_context(&intel->ctx);
int vft0 = i830->state.Ctx[I830_CTXREG_VF];
int vft1 = i830->state.Ctx[I830_CTXREG_VF2];
int nrtex = (vft0 & VFT0_TEX_COUNT_MASK) >> VFT0_TEX_COUNT_SHIFT;
int i, sz = 0;
switch (vft0 & VFT0_XYZW_MASK) {
case VFT0_XY:
sz = 2;
break;
case VFT0_XYZ:
sz = 3;
break;
case VFT0_XYW:
sz = 3;
break;
case VFT0_XYZW:
sz = 4;
break;
default:
fprintf(stderr, "no xyzw specified\n");
return 0;
}
if (vft0 & VFT0_SPEC)
sz++;
if (vft0 & VFT0_DIFFUSE)
sz++;
if (vft0 & VFT0_DEPTH_OFFSET)
sz++;
if (vft0 & VFT0_POINT_WIDTH)
sz++;
for (i = 0; i < nrtex; i++) {
switch (vft1 & VFT1_TEX0_MASK) {
case TEXCOORDFMT_2D:
sz += 2;
break;
case TEXCOORDFMT_3D:
sz += 3;
break;
case TEXCOORDFMT_4D:
sz += 4;
break;
case TEXCOORDFMT_1D:
sz += 1;
break;
}
vft1 >>= VFT1_TEX1_SHIFT;
}
if (sz != expected)
fprintf(stderr, "vertex size mismatch %d/%d\n", sz, expected);
return sz == expected;
}
static void
i830_emit_invarient_state(struct intel_context *intel)
{
BATCH_LOCALS;
BEGIN_BATCH(29);
OUT_BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
OUT_BATCH(0);
OUT_BATCH(_3DSTATE_DFLT_SPEC_CMD);
OUT_BATCH(0);
OUT_BATCH(_3DSTATE_DFLT_Z_CMD);
OUT_BATCH(0);
OUT_BATCH(_3DSTATE_FOG_MODE_CMD);
OUT_BATCH(FOGFUNC_ENABLE |
FOG_LINEAR_CONST | FOGSRC_INDEX_Z | ENABLE_FOG_DENSITY);
OUT_BATCH(0);
OUT_BATCH(0);
OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
MAP_UNIT(0) |
DISABLE_TEX_STREAM_BUMP |
ENABLE_TEX_STREAM_COORD_SET |
TEX_STREAM_COORD_SET(0) |
ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(0));
OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
MAP_UNIT(1) |
DISABLE_TEX_STREAM_BUMP |
ENABLE_TEX_STREAM_COORD_SET |
TEX_STREAM_COORD_SET(1) |
ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(1));
OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
MAP_UNIT(2) |
DISABLE_TEX_STREAM_BUMP |
ENABLE_TEX_STREAM_COORD_SET |
TEX_STREAM_COORD_SET(2) |
ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(2));
OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
MAP_UNIT(3) |
DISABLE_TEX_STREAM_BUMP |
ENABLE_TEX_STREAM_COORD_SET |
TEX_STREAM_COORD_SET(3) |
ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(3));
OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(0));
OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(1));
OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(2));
OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(3));
OUT_BATCH(_3DSTATE_VERTEX_TRANSFORM);
OUT_BATCH(DISABLE_VIEWPORT_TRANSFORM | DISABLE_PERSPECTIVE_DIVIDE);
OUT_BATCH(_3DSTATE_W_STATE_CMD);
OUT_BATCH(MAGIC_W_STATE_DWORD1);
OUT_BATCH(0x3f800000 /* 1.0 in IEEE float */ );
OUT_BATCH(_3DSTATE_COLOR_FACTOR_CMD);
OUT_BATCH(0x80808080); /* .5 required in alpha for GL_DOT3_RGBA_EXT */
ADVANCE_BATCH();
}
#define emit( intel, state, size ) \
intel_batchbuffer_data(intel, state, size)
static GLuint
get_dirty(struct i830_hw_state *state)
{
return state->active & ~state->emitted;
}
static GLuint
get_state_size(struct i830_hw_state *state)
{
GLuint dirty = get_dirty(state);
GLuint sz = 0;
GLuint i;
if (dirty & I830_UPLOAD_INVARIENT)
sz += 40 * sizeof(int);
if (dirty & I830_UPLOAD_RASTER_RULES)
sz += sizeof(state->RasterRules);
if (dirty & I830_UPLOAD_CTX)
sz += sizeof(state->Ctx);
if (dirty & I830_UPLOAD_BUFFERS)
sz += sizeof(state->Buffer);
if (dirty & I830_UPLOAD_STIPPLE)
sz += sizeof(state->Stipple);
for (i = 0; i < I830_TEX_UNITS; i++) {
if ((dirty & I830_UPLOAD_TEX(i)))
sz += sizeof(state->Tex[i]);
if (dirty & I830_UPLOAD_TEXBLEND(i))
sz += state->TexBlendWordsUsed[i] * 4;
}
return sz;
}
/* Push the state into the sarea and/or texture memory.
*/
static void
i830_emit_state(struct intel_context *intel)
{
struct i830_context *i830 = i830_context(&intel->ctx);
struct i830_hw_state *state = &i830->state;
int i, count;
GLuint dirty;
drm_intel_bo *aper_array[3 + I830_TEX_UNITS];
int aper_count;
GET_CURRENT_CONTEXT(ctx);
BATCH_LOCALS;
/* We don't hold the lock at this point, so want to make sure that
* there won't be a buffer wrap between the state emits and the primitive
* emit header.
*
* It might be better to talk about explicit places where
* scheduling is allowed, rather than assume that it is whenever a
* batchbuffer fills up.
*/
intel_batchbuffer_require_space(intel,
get_state_size(state) +
INTEL_PRIM_EMIT_SIZE);
count = 0;
again:
aper_count = 0;
dirty = get_dirty(state);
aper_array[aper_count++] = intel->batch.bo;
if (dirty & I830_UPLOAD_BUFFERS) {
aper_array[aper_count++] = state->draw_region->bo;
if (state->depth_region)
aper_array[aper_count++] = state->depth_region->bo;
}
for (i = 0; i < I830_TEX_UNITS; i++)
if (dirty & I830_UPLOAD_TEX(i)) {
if (state->tex_buffer[i]) {
aper_array[aper_count++] = state->tex_buffer[i];
}
}
if (dri_bufmgr_check_aperture_space(aper_array, aper_count)) {
if (count == 0) {
count++;
intel_batchbuffer_flush(intel);
goto again;
} else {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "i830 emit state");
assert(0);
}
}
/* Do this here as we may have flushed the batchbuffer above,
* causing more state to be dirty!
*/
dirty = get_dirty(state);
state->emitted |= dirty;
assert(get_dirty(state) == 0);
if (dirty & I830_UPLOAD_INVARIENT) {
DBG("I830_UPLOAD_INVARIENT:\n");
i830_emit_invarient_state(intel);
}
if (dirty & I830_UPLOAD_RASTER_RULES) {
DBG("I830_UPLOAD_RASTER_RULES:\n");
emit(intel, state->RasterRules, sizeof(state->RasterRules));
}
if (dirty & I830_UPLOAD_CTX) {
DBG("I830_UPLOAD_CTX:\n");
emit(intel, state->Ctx, sizeof(state->Ctx));
}
if (dirty & I830_UPLOAD_BUFFERS) {
GLuint count = 15;
DBG("I830_UPLOAD_BUFFERS:\n");
if (state->depth_region)
count += 3;
BEGIN_BATCH(count);
OUT_BATCH(state->Buffer[I830_DESTREG_CBUFADDR0]);
OUT_BATCH(state->Buffer[I830_DESTREG_CBUFADDR1]);
OUT_RELOC(state->draw_region->bo,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
if (state->depth_region) {
OUT_BATCH(state->Buffer[I830_DESTREG_DBUFADDR0]);
OUT_BATCH(state->Buffer[I830_DESTREG_DBUFADDR1]);
OUT_RELOC(state->depth_region->bo,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
}
OUT_BATCH(state->Buffer[I830_DESTREG_DV0]);
OUT_BATCH(state->Buffer[I830_DESTREG_DV1]);
OUT_BATCH(state->Buffer[I830_DESTREG_SR0]);
OUT_BATCH(state->Buffer[I830_DESTREG_SR1]);
OUT_BATCH(state->Buffer[I830_DESTREG_SR2]);
OUT_BATCH(state->Buffer[I830_DESTREG_SENABLE]);
assert(state->Buffer[I830_DESTREG_DRAWRECT0] != MI_NOOP);
OUT_BATCH(state->Buffer[I830_DESTREG_DRAWRECT0]);
OUT_BATCH(state->Buffer[I830_DESTREG_DRAWRECT1]);
OUT_BATCH(state->Buffer[I830_DESTREG_DRAWRECT2]);
OUT_BATCH(state->Buffer[I830_DESTREG_DRAWRECT3]);
OUT_BATCH(state->Buffer[I830_DESTREG_DRAWRECT4]);
OUT_BATCH(state->Buffer[I830_DESTREG_DRAWRECT5]);
ADVANCE_BATCH();
}
if (dirty & I830_UPLOAD_STIPPLE) {
DBG("I830_UPLOAD_STIPPLE:\n");
emit(intel, state->Stipple, sizeof(state->Stipple));
}
for (i = 0; i < I830_TEX_UNITS; i++) {
if ((dirty & I830_UPLOAD_TEX(i))) {
DBG("I830_UPLOAD_TEX(%d):\n", i);
BEGIN_BATCH(I830_TEX_SETUP_SIZE + 1);
OUT_BATCH(state->Tex[i][I830_TEXREG_TM0LI]);
OUT_RELOC(state->tex_buffer[i],
I915_GEM_DOMAIN_SAMPLER, 0,
state->tex_offset[i]);
OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S1]);
OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S2]);
OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S3]);
OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S4]);
OUT_BATCH(state->Tex[i][I830_TEXREG_MCS]);
OUT_BATCH(state->Tex[i][I830_TEXREG_CUBE]);
ADVANCE_BATCH();
}
if (dirty & I830_UPLOAD_TEXBLEND(i)) {
DBG("I830_UPLOAD_TEXBLEND(%d): %d words\n", i,
state->TexBlendWordsUsed[i]);
emit(intel, state->TexBlend[i], state->TexBlendWordsUsed[i] * 4);
}
}
assert(get_dirty(state) == 0);
}
static void
i830_destroy_context(struct intel_context *intel)
{
GLuint i;
struct i830_context *i830 = i830_context(&intel->ctx);
intel_region_release(&i830->state.draw_region);
intel_region_release(&i830->state.depth_region);
for (i = 0; i < I830_TEX_UNITS; i++) {
if (i830->state.tex_buffer[i] != NULL) {
drm_intel_bo_unreference(i830->state.tex_buffer[i]);
i830->state.tex_buffer[i] = NULL;
}
}
_tnl_free_vertices(&intel->ctx);
}
static uint32_t i830_render_target_format_for_mesa_format[MESA_FORMAT_COUNT] =
{
[MESA_FORMAT_B8G8R8A8_UNORM] = DV_PF_8888,
[MESA_FORMAT_B8G8R8X8_UNORM] = DV_PF_8888,
[MESA_FORMAT_B5G6R5_UNORM] = DV_PF_565,
[MESA_FORMAT_B5G5R5A1_UNORM] = DV_PF_1555,
[MESA_FORMAT_B4G4R4A4_UNORM] = DV_PF_4444,
};
static bool
i830_render_target_supported(struct intel_context *intel,
struct gl_renderbuffer *rb)
{
mesa_format format = rb->Format;
if (format == MESA_FORMAT_Z24_UNORM_S8_UINT ||
format == MESA_FORMAT_Z24_UNORM_X8_UINT ||
format == MESA_FORMAT_Z_UNORM16) {
return true;
}
return i830_render_target_format_for_mesa_format[format] != 0;
}
static void
i830_set_draw_region(struct intel_context *intel,
struct intel_region *color_regions[],
struct intel_region *depth_region,
GLuint num_regions)
{
struct i830_context *i830 = i830_context(&intel->ctx);
struct gl_context *ctx = &intel->ctx;
struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[0];
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
struct gl_renderbuffer *drb;
struct intel_renderbuffer *idrb = NULL;
GLuint value;
struct i830_hw_state *state = &i830->state;
uint32_t draw_x, draw_y;
if (state->draw_region != color_regions[0]) {
intel_region_reference(&state->draw_region, color_regions[0]);
}
if (state->depth_region != depth_region) {
intel_region_reference(&state->depth_region, depth_region);
}
/*
* Set stride/cpp values
*/
i915_set_buf_info_for_region(&state->Buffer[I830_DESTREG_CBUFADDR0],
color_regions[0], BUF_3D_ID_COLOR_BACK);
i915_set_buf_info_for_region(&state->Buffer[I830_DESTREG_DBUFADDR0],
depth_region, BUF_3D_ID_DEPTH);
/*
* Compute/set I830_DESTREG_DV1 value
*/
value = (DSTORG_HORT_BIAS(0x8) | /* .5 */
DSTORG_VERT_BIAS(0x8) | DEPTH_IS_Z); /* .5 */
if (irb != NULL) {
value |= i830_render_target_format_for_mesa_format[intel_rb_format(irb)];
}
if (depth_region && depth_region->cpp == 4) {
value |= DEPTH_FRMT_24_FIXED_8_OTHER;
}
else {
value |= DEPTH_FRMT_16_FIXED;
}
state->Buffer[I830_DESTREG_DV1] = value;
drb = ctx->DrawBuffer->Attachment[BUFFER_DEPTH].Renderbuffer;
if (!drb)
drb = ctx->DrawBuffer->Attachment[BUFFER_STENCIL].Renderbuffer;
if (drb)
idrb = intel_renderbuffer(drb);
/* We set up the drawing rectangle to be offset into the color
* region's location in the miptree. If it doesn't match with
* depth's offsets, we can't render to it.
*
* (Well, not actually true -- the hw grew a bit to let depth's
* offset get forced to 0,0. We may want to use that if people are
* hitting that case. Also, some configurations may be supportable
* by tweaking the start offset of the buffers around, which we
* can't do in general due to tiling)
*/
FALLBACK(intel, I830_FALLBACK_DRAW_OFFSET,
idrb && irb && (idrb->draw_x != irb->draw_x ||
idrb->draw_y != irb->draw_y));
if (irb) {
draw_x = irb->draw_x;
draw_y = irb->draw_y;
} else if (idrb) {
draw_x = idrb->draw_x;
draw_y = idrb->draw_y;
} else {
draw_x = 0;
draw_y = 0;
}
state->Buffer[I830_DESTREG_DRAWRECT0] = _3DSTATE_DRAWRECT_INFO;
state->Buffer[I830_DESTREG_DRAWRECT1] = 0;
state->Buffer[I830_DESTREG_DRAWRECT2] = (draw_y << 16) | draw_x;
state->Buffer[I830_DESTREG_DRAWRECT3] =
((ctx->DrawBuffer->Width + draw_x - 1) & 0xffff) |
((ctx->DrawBuffer->Height + draw_y - 1) << 16);
state->Buffer[I830_DESTREG_DRAWRECT4] = (draw_y << 16) | draw_x;
state->Buffer[I830_DESTREG_DRAWRECT5] = MI_NOOP;
I830_STATECHANGE(i830, I830_UPLOAD_BUFFERS);
}
/**
* Update the hardware state for drawing into a window or framebuffer object.
*
* Called by glDrawBuffer, glBindFramebufferEXT, MakeCurrent, and other
* places within the driver.
*
* Basically, this needs to be called any time the current framebuffer
* changes, the renderbuffers change, or we need to draw into different
* color buffers.
*/
static void
i830_update_draw_buffer(struct intel_context *intel)
{
struct gl_context *ctx = &intel->ctx;
struct gl_framebuffer *fb = ctx->DrawBuffer;
struct intel_region *colorRegions[MAX_DRAW_BUFFERS], *depthRegion = NULL;
struct intel_renderbuffer *irbDepth = NULL, *irbStencil = NULL;
if (!fb) {
/* this can happen during the initial context initialization */
return;
}
irbDepth = intel_get_renderbuffer(fb, BUFFER_DEPTH);
irbStencil = intel_get_renderbuffer(fb, BUFFER_STENCIL);
/* Do this here, not core Mesa, since this function is called from
* many places within the driver.
*/
if (ctx->NewState & _NEW_BUFFERS) {
/* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
_mesa_update_framebuffer(ctx, ctx->ReadBuffer, ctx->DrawBuffer);
/* this updates the DrawBuffer's Width/Height if it's a FBO */
_mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer);
}
if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
/* this may occur when we're called by glBindFrameBuffer() during
* the process of someone setting up renderbuffers, etc.
*/
/*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
return;
}
/* How many color buffers are we drawing into?
*
* If there are zero buffers or the buffer is too big, don't configure any
* regions for hardware drawing. We'll fallback to software below. Not
* having regions set makes some of the software fallback paths faster.
*/
if ((fb->Width > ctx->Const.MaxRenderbufferSize)
|| (fb->Height > ctx->Const.MaxRenderbufferSize)
|| (fb->_NumColorDrawBuffers == 0)) {
/* writing to 0 */
colorRegions[0] = NULL;
}
else if (fb->_NumColorDrawBuffers > 1) {
int i;
struct intel_renderbuffer *irb;
for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
irb = intel_renderbuffer(fb->_ColorDrawBuffers[i]);
colorRegions[i] = (irb && irb->mt) ? irb->mt->region : NULL;
}
}
else {
/* Get the intel_renderbuffer for the single colorbuffer we're drawing
* into.
*/
if (_mesa_is_winsys_fbo(fb)) {
/* drawing to window system buffer */
if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT)
colorRegions[0] = intel_get_rb_region(fb, BUFFER_FRONT_LEFT);
else
colorRegions[0] = intel_get_rb_region(fb, BUFFER_BACK_LEFT);
}
else {
/* drawing to user-created FBO */
struct intel_renderbuffer *irb;
irb = intel_renderbuffer(fb->_ColorDrawBuffers[0]);
colorRegions[0] = (irb && irb->mt->region) ? irb->mt->region : NULL;
}
}
if (!colorRegions[0]) {
FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, true);
}
else {
FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, false);
}
/* Check for depth fallback. */
if (irbDepth && irbDepth->mt) {
FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, false);
depthRegion = irbDepth->mt->region;
} else if (irbDepth && !irbDepth->mt) {
FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, true);
depthRegion = NULL;
} else { /* !irbDepth */
/* No fallback is needed because there is no depth buffer. */
FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, false);
depthRegion = NULL;
}
/* Check for stencil fallback. */
if (irbStencil && irbStencil->mt) {
assert(intel_rb_format(irbStencil) == MESA_FORMAT_Z24_UNORM_S8_UINT);
FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, false);
} else if (irbStencil && !irbStencil->mt) {
FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, true);
} else { /* !irbStencil */
/* No fallback is needed because there is no stencil buffer. */
FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, false);
}
/* If we have a (packed) stencil buffer attached but no depth buffer,
* we still need to set up the shared depth/stencil state so we can use it.
*/
if (depthRegion == NULL && irbStencil && irbStencil->mt
&& intel_rb_format(irbStencil) == MESA_FORMAT_Z24_UNORM_S8_UINT) {
depthRegion = irbStencil->mt->region;
}
/*
* Update depth and stencil test state
*/
ctx->Driver.Enable(ctx, GL_DEPTH_TEST, ctx->Depth.Test);
ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
(ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
intel->vtbl.set_draw_region(intel, colorRegions, depthRegion,
fb->_NumColorDrawBuffers);
intel->NewGLState |= _NEW_BUFFERS;
/* Set state we know depends on drawable parameters:
*/
intelCalcViewport(ctx);
ctx->Driver.Scissor(ctx);
/* Update culling direction which changes depending on the
* orientation of the buffer:
*/
ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
}
/* This isn't really handled at the moment.
*/
static void
i830_new_batch(struct intel_context *intel)
{
struct i830_context *i830 = i830_context(&intel->ctx);
i830->state.emitted = 0;
}
static void
i830_assert_not_dirty( struct intel_context *intel )
{
struct i830_context *i830 = i830_context(&intel->ctx);
assert(!get_dirty(&i830->state));
(void) i830;
}
static void
i830_invalidate_state(struct intel_context *intel, GLuint new_state)
{
struct gl_context *ctx = &intel->ctx;
_swsetup_InvalidateState(ctx, new_state);
_tnl_InvalidateState(ctx, new_state);
_tnl_invalidate_vertex_state(ctx, new_state);
if (new_state & _NEW_LIGHT)
i830_update_provoking_vertex(&intel->ctx);
}
void
i830InitVtbl(struct i830_context *i830)
{
i830->intel.vtbl.check_vertex_size = i830_check_vertex_size;
i830->intel.vtbl.destroy = i830_destroy_context;
i830->intel.vtbl.emit_state = i830_emit_state;
i830->intel.vtbl.new_batch = i830_new_batch;
i830->intel.vtbl.reduced_primitive_state = i830_reduced_primitive_state;
i830->intel.vtbl.set_draw_region = i830_set_draw_region;
i830->intel.vtbl.update_draw_buffer = i830_update_draw_buffer;
i830->intel.vtbl.update_texture_state = i830UpdateTextureState;
i830->intel.vtbl.render_start = i830_render_start;
i830->intel.vtbl.render_prevalidate = i830_render_prevalidate;
i830->intel.vtbl.assert_not_dirty = i830_assert_not_dirty;
i830->intel.vtbl.finish_batch = intel_finish_vb;
i830->intel.vtbl.invalidate_state = i830_invalidate_state;
i830->intel.vtbl.render_target_supported = i830_render_target_supported;
}

View File

@ -1,302 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "i915_context.h"
#include "main/api_exec.h"
#include "main/framebuffer.h"
#include "main/extensions.h"
#include "main/macros.h"
#include "main/state.h"
#include "main/version.h"
#include "main/vtxfmt.h"
#include "intel_chipset.h"
#include "intel_tris.h"
#include "tnl/t_context.h"
#include "tnl/t_pipeline.h"
#include "tnl/t_vertex.h"
#include "util/u_memory.h"
#include "swrast/swrast.h"
#include "swrast_setup/swrast_setup.h"
#include "tnl/tnl.h"
#include "util/ralloc.h"
#include "i915_reg.h"
#include "i915_program.h"
/***************************************
* Mesa's Driver Functions
***************************************/
/* Override intel default.
*/
static void
i915InvalidateState(struct gl_context * ctx)
{
GLuint new_state = ctx->NewState;
_swrast_InvalidateState(ctx, new_state);
_swsetup_InvalidateState(ctx, new_state);
_tnl_InvalidateState(ctx, new_state);
_tnl_invalidate_vertex_state(ctx, new_state);
intel_context(ctx)->NewGLState |= new_state;
if (new_state & (_NEW_SCISSOR | _NEW_BUFFERS | _NEW_VIEWPORT))
_mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer);
/* Todo: gather state values under which tracked parameters become
* invalidated, add callbacks for things like
* ProgramLocalParameters, etc.
*/
{
struct i915_fragment_program *p =
(struct i915_fragment_program *) ctx->FragmentProgram._Current;
if (p && p->nr_params)
p->params_uptodate = 0;
}
if (new_state & (_NEW_STENCIL | _NEW_BUFFERS | _NEW_POLYGON))
i915_update_stencil(ctx);
if (new_state & (_NEW_LIGHT))
i915_update_provoking_vertex(ctx);
if (new_state & (_NEW_PROGRAM | _NEW_PROGRAM_CONSTANTS))
i915_update_program(ctx);
if (new_state & (_NEW_PROGRAM | _NEW_POINT))
i915_update_sprite_point_enable(ctx);
}
static void
i915InitDriverFunctions(struct dd_function_table *functions)
{
intelInitDriverFunctions(functions);
i915InitStateFunctions(functions);
i915InitFragProgFuncs(functions);
functions->UpdateState = i915InvalidateState;
}
/* Note: this is shared with i830. */
void
intel_init_texture_formats(struct gl_context *ctx)
{
struct intel_context *intel = intel_context(ctx);
struct intel_screen *intel_screen = intel->intelScreen;
ctx->TextureFormatSupported[MESA_FORMAT_B8G8R8A8_UNORM] = true;
if (intel_screen->deviceID != PCI_CHIP_I830_M &&
intel_screen->deviceID != PCI_CHIP_845_G)
ctx->TextureFormatSupported[MESA_FORMAT_B8G8R8X8_UNORM] = true;
if (intel->gen == 3)
ctx->TextureFormatSupported[MESA_FORMAT_B8G8R8A8_SRGB] = true;
ctx->TextureFormatSupported[MESA_FORMAT_B4G4R4A4_UNORM] = true;
ctx->TextureFormatSupported[MESA_FORMAT_B5G5R5A1_UNORM] = true;
ctx->TextureFormatSupported[MESA_FORMAT_B5G6R5_UNORM] = true;
ctx->TextureFormatSupported[MESA_FORMAT_L_UNORM8] = true;
if (intel->gen == 3)
ctx->TextureFormatSupported[MESA_FORMAT_A_UNORM8] = true;
ctx->TextureFormatSupported[MESA_FORMAT_I_UNORM8] = true;
ctx->TextureFormatSupported[MESA_FORMAT_LA_UNORM8] = true;
/* Depth and stencil */
if (intel->gen == 3) {
ctx->TextureFormatSupported[MESA_FORMAT_Z24_UNORM_S8_UINT] = true;
ctx->TextureFormatSupported[MESA_FORMAT_Z24_UNORM_X8_UINT] = true;
/*
* This was disabled in initial FBO enabling to avoid combinations
* of depth+stencil that wouldn't work together. We since decided
* that it was OK, since it's up to the app to come up with the
* combo that actually works, so this can probably be re-enabled.
*/
/*
ctx->TextureFormatSupported[MESA_FORMAT_Z_UNORM16] = true;
ctx->TextureFormatSupported[MESA_FORMAT_Z24] = true;
*/
}
/* ctx->Extensions.MESA_ycbcr_texture */
ctx->TextureFormatSupported[MESA_FORMAT_YCBCR] = true;
ctx->TextureFormatSupported[MESA_FORMAT_YCBCR_REV] = true;
/* GL_3DFX_texture_compression_FXT1 */
ctx->TextureFormatSupported[MESA_FORMAT_RGB_FXT1] = true;
ctx->TextureFormatSupported[MESA_FORMAT_RGBA_FXT1] = true;
/* GL_EXT_texture_compression_s3tc */
ctx->TextureFormatSupported[MESA_FORMAT_RGB_DXT1] = true;
ctx->TextureFormatSupported[MESA_FORMAT_RGBA_DXT1] = true;
ctx->TextureFormatSupported[MESA_FORMAT_RGBA_DXT3] = true;
ctx->TextureFormatSupported[MESA_FORMAT_RGBA_DXT5] = true;
}
extern const struct tnl_pipeline_stage *intel_pipeline[];
bool
i915CreateContext(int api,
const struct gl_config * mesaVis,
__DRIcontext * driContextPriv,
unsigned major_version,
unsigned minor_version,
uint32_t flags,
unsigned *error,
void *sharedContextPrivate)
{
struct dd_function_table functions;
struct i915_context *i915 = align_calloc(sizeof(struct i915_context), 16);
struct intel_context *intel = &i915->intel;
struct gl_context *ctx = &intel->ctx;
if (!i915) {
*error = __DRI_CTX_ERROR_NO_MEMORY;
return false;
}
i915InitVtbl(i915);
i915InitDriverFunctions(&functions);
if (!intelInitContext(intel, api, major_version, minor_version, flags,
mesaVis, driContextPriv,
sharedContextPrivate, &functions,
error)) {
align_free(i915);
return false;
}
intel_init_texture_formats(ctx);
_math_matrix_ctr(&intel->ViewportMatrix);
/* Initialize swrast, tnl driver tables: */
intelInitTriFuncs(ctx);
/* Install the customized pipeline: */
_tnl_destroy_pipeline(ctx);
_tnl_install_pipeline(ctx, intel_pipeline);
if (intel->no_rast)
FALLBACK(intel, INTEL_FALLBACK_USER, 1);
ctx->Const.MaxTextureUnits = I915_TEX_UNITS;
ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = I915_TEX_UNITS;
ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits = I915_TEX_UNITS;
ctx->Const.MaxTextureCoordUnits = I915_TEX_UNITS;
ctx->Const.MaxVarying = I915_TEX_UNITS;
ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents =
ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = ctx->Const.MaxVarying * 4;
ctx->Const.MaxCombinedTextureImageUnits =
ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits +
ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits;
/* Advertise the full hardware capabilities. The new memory
* manager should cope much better with overload situations:
*/
ctx->Const.MaxTextureSize = 2048;
ctx->Const.Max3DTextureLevels = 9;
ctx->Const.MaxCubeTextureLevels = 12;
ctx->Const.MaxTextureRectSize = (1 << 11);
ctx->Const.MaxTextureUnits = I915_TEX_UNITS;
ctx->Const.MaxTextureMaxAnisotropy = 4.0;
/* GL_ARB_fragment_program limits - don't think Mesa actually
* validates programs against these, and in any case one ARB
* instruction can translate to more than one HW instruction, so
* we'll still have to check and fallback each time.
*/
ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = I915_MAX_TEMPORARY;
ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 11; /* 8 tex, 2 color, fog */
ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = I915_MAX_CONSTANT;
ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = I915_MAX_ALU_INSN;
ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = I915_MAX_TEX_INSN;
ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = (I915_MAX_ALU_INSN +
I915_MAX_TEX_INSN);
ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections =
I915_MAX_TEX_INDIRECT;
ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0; /* I don't think we have one */
ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
/* i915 stores all values in single-precision floats. Values aren't set
* for other program targets because software is used for those targets.
*/
ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumFloat.RangeMin = 127;
ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumFloat.RangeMax = 127;
ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumFloat.Precision = 23;
ctx->Const.Program[MESA_SHADER_FRAGMENT].LowFloat = ctx->Const.Program[MESA_SHADER_FRAGMENT].HighFloat =
ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumFloat;
ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt.RangeMin = 24;
ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt.RangeMax = 24;
ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt.Precision = 0;
ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt =
ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt;
ctx->FragmentProgram._MaintainTexEnvProgram = true;
_mesa_reset_vertex_processing_mode(ctx);
/* FINISHME: Are there other options that should be enabled for software
* FINISHME: vertex shaders?
*/
ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].EmitNoIndirectSampler =
true;
struct gl_shader_compiler_options *const fs_options =
& ctx->Const.ShaderCompilerOptions[MESA_SHADER_FRAGMENT];
fs_options->MaxIfDepth = 0;
fs_options->EmitNoPow = true;
fs_options->EmitNoMainReturn = true;
fs_options->EmitNoIndirectInput = true;
fs_options->EmitNoIndirectOutput = true;
fs_options->EmitNoIndirectUniform = true;
fs_options->EmitNoIndirectTemp = true;
fs_options->EmitNoIndirectSampler = true;
ctx->Const.MaxDrawBuffers = 1;
ctx->Const.QueryCounterBits.SamplesPassed = 0;
_tnl_init_vertices(ctx, ctx->Const.MaxArrayLockSize + 12,
36 * sizeof(GLfloat));
intel->verts = TNL_CONTEXT(ctx)->clipspace.vertex_buf;
i915InitState(i915);
/* Always enable pixel fog. Vertex fog using fog coord will conflict
* with fog code appended onto fragment program.
*/
_tnl_allow_vertex_fog(ctx, 0);
_tnl_allow_pixel_fog(ctx, 1);
_mesa_override_extensions(ctx);
_mesa_compute_version(ctx);
_mesa_initialize_dispatch_tables(ctx);
_mesa_initialize_vbo_vtxfmt(ctx);
return true;
}

View File

@ -1,372 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef I915CONTEXT_INC
#define I915CONTEXT_INC
#include "intel_context.h"
#define I915_FALLBACK_TEXTURE 0x1000
#define I915_FALLBACK_COLORMASK 0x2000
#define I915_FALLBACK_STENCIL 0x4000
#define I915_FALLBACK_STIPPLE 0x8000
#define I915_FALLBACK_PROGRAM 0x10000
#define I915_FALLBACK_LOGICOP 0x20000
#define I915_FALLBACK_POLYGON_SMOOTH 0x40000
#define I915_FALLBACK_POINT_SMOOTH 0x80000
#define I915_FALLBACK_POINT_SPRITE_COORD_ORIGIN 0x100000
#define I915_FALLBACK_DRAW_OFFSET 0x200000
#define I915_FALLBACK_COORD_REPLACE 0x400000
#define I915_UPLOAD_CTX 0x1
#define I915_UPLOAD_BUFFERS 0x2
#define I915_UPLOAD_STIPPLE 0x4
#define I915_UPLOAD_PROGRAM 0x8
#define I915_UPLOAD_CONSTANTS 0x10
#define I915_UPLOAD_INVARIENT 0x40
#define I915_UPLOAD_DEFAULTS 0x80
#define I915_UPLOAD_RASTER_RULES 0x100
#define I915_UPLOAD_BLEND 0x200
#define I915_UPLOAD_TEX(i) (0x00010000<<(i))
#define I915_UPLOAD_TEX_ALL (0x00ff0000)
#define I915_UPLOAD_TEX_0_SHIFT 16
/* State structure offsets - these will probably disappear.
*/
#define I915_DESTREG_CBUFADDR0 0
#define I915_DESTREG_CBUFADDR1 1
#define I915_DESTREG_DBUFADDR0 3
#define I915_DESTREG_DBUFADDR1 4
#define I915_DESTREG_DV0 6
#define I915_DESTREG_DV1 7
#define I915_DESTREG_SR0 8
#define I915_DESTREG_SR1 9
#define I915_DESTREG_SR2 10
#define I915_DESTREG_SENABLE 11
#define I915_DESTREG_DRAWRECT0 12
#define I915_DESTREG_DRAWRECT1 13
#define I915_DESTREG_DRAWRECT2 14
#define I915_DESTREG_DRAWRECT3 15
#define I915_DESTREG_DRAWRECT4 16
#define I915_DESTREG_DRAWRECT5 17
#define I915_DEST_SETUP_SIZE 18
#define I915_CTXREG_STATE4 0
#define I915_CTXREG_LI 1
#define I915_CTXREG_LIS2 2
#define I915_CTXREG_LIS3 3
#define I915_CTXREG_LIS4 4
#define I915_CTXREG_LIS5 5
#define I915_CTXREG_LIS6 6
#define I915_CTXREG_BF_STENCIL_OPS 7
#define I915_CTXREG_BF_STENCIL_MASKS 8
#define I915_CTX_SETUP_SIZE 9
#define I915_BLENDREG_IAB 0
#define I915_BLENDREG_BLENDCOLOR0 1
#define I915_BLENDREG_BLENDCOLOR1 2
#define I915_BLEND_SETUP_SIZE 3
#define I915_STPREG_ST0 0
#define I915_STPREG_ST1 1
#define I915_STP_SETUP_SIZE 2
#define I915_TEXREG_MS3 1
#define I915_TEXREG_MS4 2
#define I915_TEXREG_SS2 3
#define I915_TEXREG_SS3 4
#define I915_TEXREG_SS4 5
#define I915_TEX_SETUP_SIZE 6
#define I915_DEFREG_C0 0
#define I915_DEFREG_C1 1
#define I915_DEFREG_S0 2
#define I915_DEFREG_S1 3
#define I915_DEFREG_Z0 4
#define I915_DEFREG_Z1 5
#define I915_DEF_SETUP_SIZE 6
enum {
I915_RASTER_RULES,
I915_RASTER_RULES_SETUP_SIZE,
};
#define I915_TEX_UNITS 8
#define I915_WPOS_TEX_INVALID 0xff
#define I915_MAX_CONSTANT 32
#define I915_CONSTANT_SIZE (2+(4*I915_MAX_CONSTANT))
#define I915_MAX_TEX_INDIRECT 4
#define I915_MAX_TEX_INSN 32
#define I915_MAX_ALU_INSN 64
#define I915_MAX_DECL_INSN 27
#define I915_MAX_TEMPORARY 16
#define I915_MAX_INSN (I915_MAX_DECL_INSN + \
I915_MAX_TEX_INSN + \
I915_MAX_ALU_INSN)
/* Maximum size of the program packet, which matches the limits on
* decl, tex, and ALU instructions.
*/
#define I915_PROGRAM_SIZE (I915_MAX_INSN * 3 + 1)
/* Hardware version of a parsed fragment program. "Derived" from the
* mesa fragment_program struct.
*/
struct i915_fragment_program
{
struct gl_program FragProg;
bool translated;
bool params_uptodate;
bool on_hardware;
bool error; /* If program is malformed for any reason. */
/** Record of which phases R registers were last written in. */
GLuint register_phases[16];
GLuint indirections;
GLuint nr_tex_indirect;
GLuint nr_tex_insn;
GLuint nr_alu_insn;
GLuint nr_decl_insn;
/* TODO: split between the stored representation of a program and
* the state used to build that representation.
*/
struct gl_context *ctx;
/* declarations contains the packet header. */
GLuint declarations[I915_MAX_DECL_INSN * 3 + 1];
GLuint program[(I915_MAX_TEX_INSN + I915_MAX_ALU_INSN) * 3];
GLfloat constant[I915_MAX_CONSTANT][4];
GLuint constant_flags[I915_MAX_CONSTANT];
GLuint nr_constants;
GLuint *csr; /* Cursor, points into program.
*/
GLuint *decl; /* Cursor, points into declarations.
*/
GLuint decl_s; /* flags for which s regs need to be decl'd */
GLuint decl_t; /* flags for which t regs need to be decl'd */
GLuint temp_flag; /* Tracks temporary regs which are in
* use.
*/
GLuint utemp_flag; /* Tracks TYPE_U temporary regs which are in
* use.
*/
/* Track which R registers are "live" for each instruction.
* A register is live between the time it's written to and the last time
* it's read. */
GLuint usedRegs[I915_MAX_INSN];
/* Helpers for i915_fragprog.c:
*/
uint8_t texcoord_mapping[I915_TEX_UNITS];
uint8_t wpos_tex;
bool depth_written;
struct
{
GLuint reg; /* Hardware constant idx */
const GLfloat *values; /* Pointer to tracked values */
} param[I915_MAX_CONSTANT];
GLuint nr_params;
};
struct i915_hw_state
{
GLuint Ctx[I915_CTX_SETUP_SIZE];
GLuint Blend[I915_BLEND_SETUP_SIZE];
GLuint Buffer[I915_DEST_SETUP_SIZE];
GLuint Stipple[I915_STP_SETUP_SIZE];
GLuint Defaults[I915_DEF_SETUP_SIZE];
GLuint RasterRules[I915_RASTER_RULES_SETUP_SIZE];
GLuint Tex[I915_TEX_UNITS][I915_TEX_SETUP_SIZE];
GLuint Constant[I915_CONSTANT_SIZE];
GLuint ConstantSize;
GLuint Program[I915_PROGRAM_SIZE];
GLuint ProgramSize;
/* Region pointers for relocation:
*/
struct intel_region *draw_region;
struct intel_region *depth_region;
/* struct intel_region *tex_region[I915_TEX_UNITS]; */
/* Regions aren't actually that appropriate here as the memory may
* be from a PBO or FBO. Will have to do this for draw and depth for
* FBO's...
*/
drm_intel_bo *tex_buffer[I915_TEX_UNITS];
GLuint tex_offset[I915_TEX_UNITS];
GLuint active; /* I915_UPLOAD_* */
GLuint emitted; /* I915_UPLOAD_* */
};
struct i915_context
{
struct intel_context intel;
GLuint lodbias_ss2[MAX_TEXTURE_UNITS];
struct i915_fragment_program *current_program;
drm_intel_bo *current_vb_bo;
unsigned int current_vertex_size;
struct i915_hw_state state;
uint32_t last_draw_offset;
GLuint last_sampler;
};
#define I915_STATECHANGE(i915, flag) \
do { \
INTEL_FIREVERTICES( &(i915)->intel ); \
(i915)->state.emitted &= ~(flag); \
} while (0)
#define I915_ACTIVESTATE(i915, flag, mode) \
do { \
INTEL_FIREVERTICES( &(i915)->intel ); \
if (mode) \
(i915)->state.active |= (flag); \
else \
(i915)->state.active &= ~(flag); \
} while (0)
/*======================================================================
* i915_vtbl.c
*/
extern void i915InitVtbl(struct i915_context *i915);
extern void
i915_state_draw_region(struct intel_context *intel,
struct i915_hw_state *state,
struct intel_region *color_region,
struct intel_region *depth_region);
#define SZ_TO_HW(sz) ((sz-2)&0x3)
#define EMIT_SZ(sz) (EMIT_1F + (sz) - 1)
#define EMIT_ATTR( ATTR, STYLE, S4, SZ ) \
do { \
intel->vertex_attrs[intel->vertex_attr_count].attrib = (ATTR); \
intel->vertex_attrs[intel->vertex_attr_count].format = (STYLE); \
s4 |= S4; \
intel->vertex_attr_count++; \
offset += (SZ); \
} while (0)
#define EMIT_PAD( N ) \
do { \
intel->vertex_attrs[intel->vertex_attr_count].attrib = 0; \
intel->vertex_attrs[intel->vertex_attr_count].format = EMIT_PAD; \
intel->vertex_attrs[intel->vertex_attr_count].offset = (N); \
intel->vertex_attr_count++; \
offset += (N); \
} while (0)
/*======================================================================
* i915_context.c
*/
extern bool i915CreateContext(int api,
const struct gl_config * mesaVis,
__DRIcontext * driContextPriv,
unsigned major_version,
unsigned minor_version,
uint32_t flags,
unsigned *error,
void *sharedContextPrivate);
/*======================================================================
* i915_debug.c
*/
extern void i915_disassemble_program(const GLuint * program, GLuint sz);
extern void i915_print_ureg(const char *msg, GLuint ureg);
/*======================================================================
* i915_state.c
*/
extern void i915InitStateFunctions(struct dd_function_table *functions);
extern void i915InitState(struct i915_context *i915);
extern void i915_update_stencil(struct gl_context * ctx);
extern void i915_update_provoking_vertex(struct gl_context *ctx);
extern void i915_update_sprite_point_enable(struct gl_context *ctx);
/*======================================================================
* i915_tex.c
*/
extern void i915UpdateTextureState(struct intel_context *intel);
extern void i915InitTextureFuncs(struct dd_function_table *functions);
/*======================================================================
* i915_fragprog.c
*/
extern void i915ValidateFragmentProgram(struct i915_context *i915);
extern void i915InitFragProgFuncs(struct dd_function_table *functions);
/*======================================================================
* Inline conversion functions. These are better-typed than the
* macros used previously:
*/
static inline struct i915_context *
i915_context(struct gl_context * ctx)
{
return (struct i915_context *) ctx;
}
#define I915_CONTEXT(ctx) i915_context(ctx)
#endif

View File

@ -1,39 +0,0 @@
/**************************************************************************
*
* Copyright 2007 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/* Authors: Keith Whitwell <keithw@vmware.com>
*/
#ifndef I915_DEBUG_H
#define I915_DEBUG_H
struct i915_context;
extern void i915_disassemble_program(const unsigned *program, unsigned sz);
extern void i915_print_ureg(const char *msg, unsigned ureg);
#endif

View File

@ -1,330 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <stdio.h>
#include <assert.h>
#include "main/glheader.h"
#include "i915_reg.h"
#include "i915_debug.h"
#include "main/glheader.h"
static const char *opcodes[0x20] = {
"NOP",
"ADD",
"MOV",
"MUL",
"MAD",
"DP2ADD",
"DP3",
"DP4",
"FRC",
"RCP",
"RSQ",
"EXP",
"LOG",
"CMP",
"MIN",
"MAX",
"FLR",
"MOD",
"TRC",
"SGE",
"SLT",
"TEXLD",
"TEXLDP",
"TEXLDB",
"TEXKILL",
"DCL",
"0x1a",
"0x1b",
"0x1c",
"0x1d",
"0x1e",
"0x1f",
};
static const int args[0x20] = {
0, /* 0 nop */
2, /* 1 add */
1, /* 2 mov */
2, /* 3 m ul */
3, /* 4 mad */
3, /* 5 dp2add */
2, /* 6 dp3 */
2, /* 7 dp4 */
1, /* 8 frc */
1, /* 9 rcp */
1, /* a rsq */
1, /* b exp */
1, /* c log */
3, /* d cmp */
2, /* e min */
2, /* f max */
1, /* 10 flr */
1, /* 11 mod */
1, /* 12 trc */
2, /* 13 sge */
2, /* 14 slt */
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
};
static const char *regname[0x8] = {
"R",
"T",
"CONST",
"S",
"OC",
"OD",
"U",
"UNKNOWN",
};
static void
print_reg_type_nr(GLuint type, GLuint nr)
{
switch (type) {
case REG_TYPE_T:
switch (nr) {
case T_DIFFUSE:
printf("T_DIFFUSE");
return;
case T_SPECULAR:
printf("T_SPECULAR");
return;
case T_FOG_W:
printf("T_FOG_W");
return;
default:
printf("T_TEX%d", nr);
return;
}
case REG_TYPE_OC:
if (nr == 0) {
printf("oC");
return;
}
break;
case REG_TYPE_OD:
if (nr == 0) {
printf("oD");
return;
}
break;
default:
break;
}
printf("%s[%d]", regname[type], nr);
}
#define REG_SWIZZLE_MASK 0x7777
#define REG_NEGATE_MASK 0x8888
#define REG_SWIZZLE_XYZW ((SRC_X << A2_SRC2_CHANNEL_X_SHIFT) | \
(SRC_Y << A2_SRC2_CHANNEL_Y_SHIFT) | \
(SRC_Z << A2_SRC2_CHANNEL_Z_SHIFT) | \
(SRC_W << A2_SRC2_CHANNEL_W_SHIFT))
static void
print_reg_neg_swizzle(GLuint reg)
{
int i;
if ((reg & REG_SWIZZLE_MASK) == REG_SWIZZLE_XYZW &&
(reg & REG_NEGATE_MASK) == 0)
return;
printf(".");
for (i = 3; i >= 0; i--) {
if (reg & (1 << ((i * 4) + 3)))
printf("-");
switch ((reg >> (i * 4)) & 0x7) {
case 0:
printf("x");
break;
case 1:
printf("y");
break;
case 2:
printf("z");
break;
case 3:
printf("w");
break;
case 4:
printf("0");
break;
case 5:
printf("1");
break;
default:
printf("?");
break;
}
}
}
static void
print_src_reg(GLuint dword)
{
GLuint nr = (dword >> A2_SRC2_NR_SHIFT) & REG_NR_MASK;
GLuint type = (dword >> A2_SRC2_TYPE_SHIFT) & REG_TYPE_MASK;
print_reg_type_nr(type, nr);
print_reg_neg_swizzle(dword);
}
static void
print_dest_reg(GLuint dword)
{
GLuint nr = (dword >> A0_DEST_NR_SHIFT) & REG_NR_MASK;
GLuint type = (dword >> A0_DEST_TYPE_SHIFT) & REG_TYPE_MASK;
print_reg_type_nr(type, nr);
if ((dword & A0_DEST_CHANNEL_ALL) == A0_DEST_CHANNEL_ALL)
return;
printf(".");
if (dword & A0_DEST_CHANNEL_X)
printf("x");
if (dword & A0_DEST_CHANNEL_Y)
printf("y");
if (dword & A0_DEST_CHANNEL_Z)
printf("z");
if (dword & A0_DEST_CHANNEL_W)
printf("w");
}
#define GET_SRC0_REG(r0, r1) ((r0<<14)|(r1>>A1_SRC0_CHANNEL_W_SHIFT))
#define GET_SRC1_REG(r0, r1) ((r0<<8)|(r1>>A2_SRC1_CHANNEL_W_SHIFT))
#define GET_SRC2_REG(r) (r)
static void
print_arith_op(GLuint opcode, const GLuint * program)
{
if (opcode != A0_NOP) {
print_dest_reg(program[0]);
if (program[0] & A0_DEST_SATURATE)
printf(" = SATURATE ");
else
printf(" = ");
}
printf("%s ", opcodes[opcode]);
print_src_reg(GET_SRC0_REG(program[0], program[1]));
if (args[opcode] == 1) {
printf("\n");
return;
}
printf(", ");
print_src_reg(GET_SRC1_REG(program[1], program[2]));
if (args[opcode] == 2) {
printf("\n");
return;
}
printf(", ");
print_src_reg(GET_SRC2_REG(program[2]));
printf("\n");
return;
}
static void
print_tex_op(GLuint opcode, const GLuint * program)
{
print_dest_reg(program[0] | A0_DEST_CHANNEL_ALL);
printf(" = ");
printf("%s ", opcodes[opcode]);
printf("S[%d],", program[0] & T0_SAMPLER_NR_MASK);
print_reg_type_nr((program[1] >> T1_ADDRESS_REG_TYPE_SHIFT) &
REG_TYPE_MASK,
(program[1] >> T1_ADDRESS_REG_NR_SHIFT) & REG_NR_MASK);
printf("\n");
}
static void
print_dcl_op(GLuint opcode, const GLuint * program)
{
printf("%s ", opcodes[opcode]);
print_dest_reg(program[0] | A0_DEST_CHANNEL_ALL);
printf("\n");
}
void
i915_disassemble_program(const GLuint * program, GLuint sz)
{
GLint i;
printf("\t\tBEGIN\n");
assert((program[0] & 0x1ff) + 2 == sz);
program++;
for (i = 1; i < sz; i += 3, program += 3) {
GLuint opcode = program[0] & (0x1f << 24);
printf("\t\t");
if ((GLint) opcode >= A0_NOP && opcode <= A0_SLT)
print_arith_op(opcode >> 24, program);
else if (opcode >= T0_TEXLD && opcode <= T0_TEXKILL)
print_tex_op(opcode >> 24, program);
else if (opcode == D0_DCL)
print_dcl_op(opcode >> 24, program);
else
printf("Unknown opcode 0x%x\n", opcode);
}
printf("\t\tEND\n\n");
}

File diff suppressed because it is too large Load Diff

View File

@ -1,588 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <strings.h>
#include "main/glheader.h"
#include "main/macros.h"
#include "main/enums.h"
#include "tnl/t_context.h"
#include "intel_batchbuffer.h"
#include "i915_reg.h"
#include "i915_context.h"
#include "i915_program.h"
#define A0_DEST( reg ) (((reg)&UREG_TYPE_NR_MASK)>>UREG_A0_DEST_SHIFT_LEFT)
#define D0_DEST( reg ) (((reg)&UREG_TYPE_NR_MASK)>>UREG_A0_DEST_SHIFT_LEFT)
#define T0_DEST( reg ) (((reg)&UREG_TYPE_NR_MASK)>>UREG_A0_DEST_SHIFT_LEFT)
#define A0_SRC0( reg ) (((reg)&UREG_MASK)>>UREG_A0_SRC0_SHIFT_LEFT)
#define A1_SRC0( reg ) (((reg)&UREG_MASK)<<UREG_A1_SRC0_SHIFT_RIGHT)
#define A1_SRC1( reg ) (((reg)&UREG_MASK)>>UREG_A1_SRC1_SHIFT_LEFT)
#define A2_SRC1( reg ) (((reg)&UREG_MASK)<<UREG_A2_SRC1_SHIFT_RIGHT)
#define A2_SRC2( reg ) (((reg)&UREG_MASK)>>UREG_A2_SRC2_SHIFT_LEFT)
/* These are special, and don't have swizzle/negate bits.
*/
#define T0_SAMPLER( reg ) (GET_UREG_NR(reg)<<T0_SAMPLER_NR_SHIFT)
#define T1_ADDRESS_REG( reg ) ((GET_UREG_NR(reg)<<T1_ADDRESS_REG_NR_SHIFT) | \
(GET_UREG_TYPE(reg)<<T1_ADDRESS_REG_TYPE_SHIFT))
/* Macros for translating UREG's into the various register fields used
* by the I915 programmable unit.
*/
#define UREG_A0_DEST_SHIFT_LEFT (UREG_TYPE_SHIFT - A0_DEST_TYPE_SHIFT)
#define UREG_A0_SRC0_SHIFT_LEFT (UREG_TYPE_SHIFT - A0_SRC0_TYPE_SHIFT)
#define UREG_A1_SRC0_SHIFT_RIGHT (A1_SRC0_CHANNEL_W_SHIFT - UREG_CHANNEL_W_SHIFT)
#define UREG_A1_SRC1_SHIFT_LEFT (UREG_TYPE_SHIFT - A1_SRC1_TYPE_SHIFT)
#define UREG_A2_SRC1_SHIFT_RIGHT (A2_SRC1_CHANNEL_W_SHIFT - UREG_CHANNEL_W_SHIFT)
#define UREG_A2_SRC2_SHIFT_LEFT (UREG_TYPE_SHIFT - A2_SRC2_TYPE_SHIFT)
#define UREG_MASK 0xffffff00
#define UREG_TYPE_NR_MASK ((REG_TYPE_MASK << UREG_TYPE_SHIFT) | \
(REG_NR_MASK << UREG_NR_SHIFT))
#define I915_CONSTFLAG_PARAM 0x1f
GLuint
i915_get_temp(struct i915_fragment_program *p)
{
int bit = ffs(~p->temp_flag);
if (!bit) {
fprintf(stderr, "%s: out of temporaries\n", __FILE__);
exit(1);
}
p->temp_flag |= 1 << (bit - 1);
return UREG(REG_TYPE_R, (bit - 1));
}
GLuint
i915_get_utemp(struct i915_fragment_program * p)
{
int bit = ffs(~p->utemp_flag);
if (!bit) {
fprintf(stderr, "%s: out of temporaries\n", __FILE__);
exit(1);
}
p->utemp_flag |= 1 << (bit - 1);
return UREG(REG_TYPE_U, (bit - 1));
}
void
i915_release_utemps(struct i915_fragment_program *p)
{
p->utemp_flag = ~0x7;
}
GLuint
i915_emit_decl(struct i915_fragment_program *p,
GLuint type, GLuint nr, GLuint d0_flags)
{
GLuint reg = UREG(type, nr);
if (type == REG_TYPE_T) {
if (p->decl_t & (1 << nr))
return reg;
p->decl_t |= (1 << nr);
}
else if (type == REG_TYPE_S) {
if (p->decl_s & (1 << nr))
return reg;
p->decl_s |= (1 << nr);
}
else
return reg;
*(p->decl++) = (D0_DCL | D0_DEST(reg) | d0_flags);
*(p->decl++) = D1_MBZ;
*(p->decl++) = D2_MBZ;
assert(p->decl <= p->declarations + ARRAY_SIZE(p->declarations));
p->nr_decl_insn++;
return reg;
}
GLuint
i915_emit_arith(struct i915_fragment_program * p,
GLuint op,
GLuint dest,
GLuint mask,
GLuint saturate, GLuint src0, GLuint src1, GLuint src2)
{
GLuint c[3];
GLuint nr_const = 0;
assert(GET_UREG_TYPE(dest) != REG_TYPE_CONST);
dest = UREG(GET_UREG_TYPE(dest), GET_UREG_NR(dest));
assert(dest);
if (GET_UREG_TYPE(src0) == REG_TYPE_CONST)
c[nr_const++] = 0;
if (GET_UREG_TYPE(src1) == REG_TYPE_CONST)
c[nr_const++] = 1;
if (GET_UREG_TYPE(src2) == REG_TYPE_CONST)
c[nr_const++] = 2;
/* Recursively call this function to MOV additional const values
* into temporary registers. Use utemp registers for this -
* currently shouldn't be possible to run out, but keep an eye on
* this.
*/
if (nr_const > 1) {
GLuint s[3], first, i, old_utemp_flag;
s[0] = src0;
s[1] = src1;
s[2] = src2;
old_utemp_flag = p->utemp_flag;
first = GET_UREG_NR(s[c[0]]);
for (i = 1; i < nr_const; i++) {
if (GET_UREG_NR(s[c[i]]) != first) {
GLuint tmp = i915_get_utemp(p);
i915_emit_arith(p, A0_MOV, tmp, A0_DEST_CHANNEL_ALL, 0,
s[c[i]], 0, 0);
s[c[i]] = tmp;
}
}
src0 = s[0];
src1 = s[1];
src2 = s[2];
p->utemp_flag = old_utemp_flag; /* restore */
}
if (p->csr >= p->program + ARRAY_SIZE(p->program)) {
i915_program_error(p, "Program contains too many instructions");
return UREG_BAD;
}
*(p->csr++) = (op | A0_DEST(dest) | mask | saturate | A0_SRC0(src0));
*(p->csr++) = (A1_SRC0(src0) | A1_SRC1(src1));
*(p->csr++) = (A2_SRC1(src1) | A2_SRC2(src2));
if (GET_UREG_TYPE(dest) == REG_TYPE_R)
p->register_phases[GET_UREG_NR(dest)] = p->nr_tex_indirect;
p->nr_alu_insn++;
return dest;
}
static GLuint get_free_rreg (struct i915_fragment_program *p,
GLuint live_regs)
{
int bit = ffs(~live_regs);
if (!bit) {
i915_program_error(p, "Can't find free R reg");
return UREG_BAD;
}
return UREG(REG_TYPE_R, bit - 1);
}
GLuint i915_emit_texld( struct i915_fragment_program *p,
GLuint live_regs,
GLuint dest,
GLuint destmask,
GLuint sampler,
GLuint coord,
GLuint op )
{
if (coord != UREG(GET_UREG_TYPE(coord), GET_UREG_NR(coord))) {
/* With the help of the "needed registers" table created earlier, pick
* a register we can MOV the swizzled TC to (since TEX doesn't support
* swizzled sources) */
GLuint swizCoord = get_free_rreg(p, live_regs);
if (swizCoord == UREG_BAD)
return 0;
i915_emit_arith( p, A0_MOV, swizCoord, A0_DEST_CHANNEL_ALL, 0, coord, 0, 0 );
coord = swizCoord;
}
/* Don't worry about saturate as we only support texture formats
* that are always in the 0..1 range.
*/
if (destmask != A0_DEST_CHANNEL_ALL) {
GLuint tmp = i915_get_utemp(p);
i915_emit_texld( p, 0, tmp, A0_DEST_CHANNEL_ALL, sampler, coord, op );
i915_emit_arith( p, A0_MOV, dest, destmask, 0, tmp, 0, 0 );
return dest;
}
else {
assert(GET_UREG_TYPE(dest) != REG_TYPE_CONST);
assert(dest == UREG(GET_UREG_TYPE(dest), GET_UREG_NR(dest)));
/* Can't use unsaved temps for coords, as the phase boundary would result
* in the contents becoming undefined.
*/
assert(GET_UREG_TYPE(coord) != REG_TYPE_U);
if ((GET_UREG_TYPE(coord) != REG_TYPE_R) &&
(GET_UREG_TYPE(coord) != REG_TYPE_OC) &&
(GET_UREG_TYPE(coord) != REG_TYPE_OD) &&
(GET_UREG_TYPE(coord) != REG_TYPE_T)) {
GLuint tmpCoord = get_free_rreg(p, live_regs);
if (tmpCoord == UREG_BAD)
return 0;
i915_emit_arith(p, A0_MOV, tmpCoord, A0_DEST_CHANNEL_ALL, 0, coord, 0, 0);
coord = tmpCoord;
}
/* Output register being oC or oD defines a phase boundary */
if (GET_UREG_TYPE(dest) == REG_TYPE_OC ||
GET_UREG_TYPE(dest) == REG_TYPE_OD)
p->nr_tex_indirect++;
/* Reading from an r# register whose contents depend on output of the
* current phase defines a phase boundary.
*/
if (GET_UREG_TYPE(coord) == REG_TYPE_R &&
p->register_phases[GET_UREG_NR(coord)] == p->nr_tex_indirect)
p->nr_tex_indirect++;
if (p->csr >= p->program + ARRAY_SIZE(p->program)) {
i915_program_error(p, "Program contains too many instructions");
return UREG_BAD;
}
*(p->csr++) = (op |
T0_DEST( dest ) |
T0_SAMPLER( sampler ));
*(p->csr++) = T1_ADDRESS_REG( coord );
*(p->csr++) = T2_MBZ;
if (GET_UREG_TYPE(dest) == REG_TYPE_R)
p->register_phases[GET_UREG_NR(dest)] = p->nr_tex_indirect;
p->nr_tex_insn++;
return dest;
}
}
GLuint
i915_emit_const1f(struct i915_fragment_program * p, GLfloat c0)
{
GLint reg, idx;
if (c0 == 0.0)
return swizzle(UREG(REG_TYPE_R, 0), ZERO, ZERO, ZERO, ZERO);
if (c0 == 1.0)
return swizzle(UREG(REG_TYPE_R, 0), ONE, ONE, ONE, ONE);
for (reg = 0; reg < I915_MAX_CONSTANT; reg++) {
if (p->constant_flags[reg] == I915_CONSTFLAG_PARAM)
continue;
for (idx = 0; idx < 4; idx++) {
if (!(p->constant_flags[reg] & (1 << idx)) ||
p->constant[reg][idx] == c0) {
p->constant[reg][idx] = c0;
p->constant_flags[reg] |= 1 << idx;
if (reg + 1 > p->nr_constants)
p->nr_constants = reg + 1;
return swizzle(UREG(REG_TYPE_CONST, reg), idx, ZERO, ZERO, ONE);
}
}
}
fprintf(stderr, "%s: out of constants\n", __func__);
p->error = 1;
return 0;
}
GLuint
i915_emit_const2f(struct i915_fragment_program * p, GLfloat c0, GLfloat c1)
{
GLint reg, idx;
if (c0 == 0.0)
return swizzle(i915_emit_const1f(p, c1), ZERO, X, Z, W);
if (c0 == 1.0)
return swizzle(i915_emit_const1f(p, c1), ONE, X, Z, W);
if (c1 == 0.0)
return swizzle(i915_emit_const1f(p, c0), X, ZERO, Z, W);
if (c1 == 1.0)
return swizzle(i915_emit_const1f(p, c0), X, ONE, Z, W);
for (reg = 0; reg < I915_MAX_CONSTANT; reg++) {
if (p->constant_flags[reg] == 0xf ||
p->constant_flags[reg] == I915_CONSTFLAG_PARAM)
continue;
for (idx = 0; idx < 3; idx++) {
if (!(p->constant_flags[reg] & (3 << idx))) {
p->constant[reg][idx] = c0;
p->constant[reg][idx + 1] = c1;
p->constant_flags[reg] |= 3 << idx;
if (reg + 1 > p->nr_constants)
p->nr_constants = reg + 1;
return swizzle(UREG(REG_TYPE_CONST, reg), idx, idx + 1, ZERO,
ONE);
}
}
}
fprintf(stderr, "%s: out of constants\n", __func__);
p->error = 1;
return 0;
}
GLuint
i915_emit_const4f(struct i915_fragment_program * p,
GLfloat c0, GLfloat c1, GLfloat c2, GLfloat c3)
{
GLint reg;
for (reg = 0; reg < I915_MAX_CONSTANT; reg++) {
if (p->constant_flags[reg] == 0xf &&
p->constant[reg][0] == c0 &&
p->constant[reg][1] == c1 &&
p->constant[reg][2] == c2 && p->constant[reg][3] == c3) {
return UREG(REG_TYPE_CONST, reg);
}
else if (p->constant_flags[reg] == 0) {
p->constant[reg][0] = c0;
p->constant[reg][1] = c1;
p->constant[reg][2] = c2;
p->constant[reg][3] = c3;
p->constant_flags[reg] = 0xf;
if (reg + 1 > p->nr_constants)
p->nr_constants = reg + 1;
return UREG(REG_TYPE_CONST, reg);
}
}
fprintf(stderr, "%s: out of constants\n", __func__);
p->error = 1;
return 0;
}
GLuint
i915_emit_const4fv(struct i915_fragment_program * p, const GLfloat * c)
{
return i915_emit_const4f(p, c[0], c[1], c[2], c[3]);
}
GLuint
i915_emit_param4fv(struct i915_fragment_program * p, const GLfloat * values)
{
GLint reg, i;
for (i = 0; i < p->nr_params; i++) {
if (p->param[i].values == values)
return UREG(REG_TYPE_CONST, p->param[i].reg);
}
for (reg = 0; reg < I915_MAX_CONSTANT; reg++) {
if (p->constant_flags[reg] == 0) {
p->constant_flags[reg] = I915_CONSTFLAG_PARAM;
i = p->nr_params++;
p->param[i].values = values;
p->param[i].reg = reg;
p->params_uptodate = 0;
if (reg + 1 > p->nr_constants)
p->nr_constants = reg + 1;
return UREG(REG_TYPE_CONST, reg);
}
}
fprintf(stderr, "%s: out of constants\n", __func__);
p->error = 1;
return 0;
}
/* Warning the user about program errors seems to be quite valuable, from
* our bug reports. It unfortunately means piglit reporting errors
* when we fall back to software due to an unsupportable program, though.
*/
void
i915_program_error(struct i915_fragment_program *p, const char *fmt, ...)
{
if (unlikely((INTEL_DEBUG & (DEBUG_WM | DEBUG_PERF)) != 0)) {
va_list args;
fprintf(stderr, "i915_program_error: ");
va_start(args, fmt);
vfprintf(stderr, fmt, args);
va_end(args);
fprintf(stderr, "\n");
}
p->error = 1;
}
void
i915_init_program(struct i915_context *i915, struct i915_fragment_program *p)
{
struct gl_context *ctx = &i915->intel.ctx;
p->translated = 0;
p->params_uptodate = 0;
p->on_hardware = 0;
p->error = 0;
memset(&p->register_phases, 0, sizeof(p->register_phases));
p->nr_tex_indirect = 1;
p->nr_tex_insn = 0;
p->nr_alu_insn = 0;
p->nr_decl_insn = 0;
p->ctx = ctx;
memset(p->constant_flags, 0, sizeof(p->constant_flags));
p->nr_constants = 0;
p->csr = p->program;
p->decl = p->declarations;
p->decl_s = 0;
p->decl_t = 0;
p->temp_flag = 0xffff000;
p->utemp_flag = ~0x7;
p->wpos_tex = I915_WPOS_TEX_INVALID;
p->depth_written = 0;
p->nr_params = 0;
*(p->decl++) = _3DSTATE_PIXEL_SHADER_PROGRAM;
}
void
i915_fini_program(struct i915_fragment_program *p)
{
GLuint program_size = p->csr - p->program;
GLuint decl_size = p->decl - p->declarations;
if (p->nr_tex_indirect > I915_MAX_TEX_INDIRECT) {
i915_program_error(p, "Exceeded max nr indirect texture lookups "
"(%d out of %d)",
p->nr_tex_indirect, I915_MAX_TEX_INDIRECT);
}
if (p->nr_tex_insn > I915_MAX_TEX_INSN) {
i915_program_error(p, "Exceeded max TEX instructions (%d out of %d)",
p->nr_tex_insn, I915_MAX_TEX_INSN);
}
if (p->nr_alu_insn > I915_MAX_ALU_INSN)
i915_program_error(p, "Exceeded max ALU instructions (%d out of %d)",
p->nr_alu_insn, I915_MAX_ALU_INSN);
if (p->nr_decl_insn > I915_MAX_DECL_INSN) {
i915_program_error(p, "Exceeded max DECL instructions (%d out of %d)",
p->nr_decl_insn, I915_MAX_DECL_INSN);
}
if (p->error) {
p->FragProg.arb.NumNativeInstructions = 0;
p->FragProg.arb.NumNativeAluInstructions = 0;
p->FragProg.arb.NumNativeTexInstructions = 0;
p->FragProg.arb.NumNativeTexIndirections = 0;
}
else {
p->FragProg.arb.NumNativeInstructions = (p->nr_alu_insn +
p->nr_tex_insn +
p->nr_decl_insn);
p->FragProg.arb.NumNativeAluInstructions = p->nr_alu_insn;
p->FragProg.arb.NumNativeTexInstructions = p->nr_tex_insn;
p->FragProg.arb.NumNativeTexIndirections = p->nr_tex_indirect;
}
p->declarations[0] |= program_size + decl_size - 2;
}
void
i915_upload_program(struct i915_context *i915,
struct i915_fragment_program *p)
{
GLuint program_size = p->csr - p->program;
GLuint decl_size = p->decl - p->declarations;
if (p->error)
return;
/* Could just go straight to the batchbuffer from here:
*/
if (i915->state.ProgramSize != (program_size + decl_size) ||
memcmp(i915->state.Program + decl_size, p->program,
program_size * sizeof(int)) != 0) {
I915_STATECHANGE(i915, I915_UPLOAD_PROGRAM);
memcpy(i915->state.Program, p->declarations, decl_size * sizeof(int));
memcpy(i915->state.Program + decl_size, p->program,
program_size * sizeof(int));
i915->state.ProgramSize = decl_size + program_size;
}
/* Always seemed to get a failure if I used memcmp() to
* shortcircuit this state upload. Needs further investigation?
*/
if (p->nr_constants) {
GLuint nr = p->nr_constants;
I915_ACTIVESTATE(i915, I915_UPLOAD_CONSTANTS, 1);
I915_STATECHANGE(i915, I915_UPLOAD_CONSTANTS);
i915->state.Constant[0] = _3DSTATE_PIXEL_SHADER_CONSTANTS | ((nr) * 4);
i915->state.Constant[1] = (1 << (nr - 1)) | ((1 << (nr - 1)) - 1);
memcpy(&i915->state.Constant[2], p->constant, 4 * sizeof(int) * (nr));
i915->state.ConstantSize = 2 + (nr) * 4;
if (0) {
GLuint i;
for (i = 0; i < nr; i++) {
fprintf(stderr, "const[%d]: %f %f %f %f\n", i,
p->constant[i][0],
p->constant[i][1], p->constant[i][2], p->constant[i][3]);
}
}
}
else {
I915_ACTIVESTATE(i915, I915_UPLOAD_CONSTANTS, 0);
}
p->on_hardware = 1;
}

View File

@ -1,160 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef I915_PROGRAM_H
#define I915_PROGRAM_H
#include "i915_context.h"
#include "i915_reg.h"
/* Having zero and one in here makes the definition of swizzle a lot
* easier.
*/
#define UREG_TYPE_SHIFT 29
#define UREG_NR_SHIFT 24
#define UREG_CHANNEL_X_NEGATE_SHIFT 23
#define UREG_CHANNEL_X_SHIFT 20
#define UREG_CHANNEL_Y_NEGATE_SHIFT 19
#define UREG_CHANNEL_Y_SHIFT 16
#define UREG_CHANNEL_Z_NEGATE_SHIFT 15
#define UREG_CHANNEL_Z_SHIFT 12
#define UREG_CHANNEL_W_NEGATE_SHIFT 11
#define UREG_CHANNEL_W_SHIFT 8
#define UREG_CHANNEL_ZERO_NEGATE_MBZ 5
#define UREG_CHANNEL_ZERO_SHIFT 4
#define UREG_CHANNEL_ONE_NEGATE_MBZ 1
#define UREG_CHANNEL_ONE_SHIFT 0
#define UREG_BAD 0xffffffff /* not a valid ureg */
#define X SRC_X
#define Y SRC_Y
#define Z SRC_Z
#define W SRC_W
#define ZERO SRC_ZERO
#define ONE SRC_ONE
/* Construct a ureg:
*/
#define UREG( type, nr ) (((type)<< UREG_TYPE_SHIFT) | \
((nr) << UREG_NR_SHIFT) | \
(X << UREG_CHANNEL_X_SHIFT) | \
(Y << UREG_CHANNEL_Y_SHIFT) | \
(Z << UREG_CHANNEL_Z_SHIFT) | \
(W << UREG_CHANNEL_W_SHIFT) | \
(ZERO << UREG_CHANNEL_ZERO_SHIFT) | \
(ONE << UREG_CHANNEL_ONE_SHIFT))
#define GET_CHANNEL_SRC( reg, channel ) ((reg<<(channel*4)) & (0xf<<20))
#define CHANNEL_SRC( src, channel ) (src>>(channel*4))
#define GET_UREG_TYPE(reg) (((reg)>>UREG_TYPE_SHIFT)&REG_TYPE_MASK)
#define GET_UREG_NR(reg) (((reg)>>UREG_NR_SHIFT)&REG_NR_MASK)
#define UREG_XYZW_CHANNEL_MASK 0x00ffff00
/* One neat thing about the UREG representation:
*/
static inline int
swizzle(int reg, int x, int y, int z, int w)
{
return ((reg & ~UREG_XYZW_CHANNEL_MASK) |
CHANNEL_SRC(GET_CHANNEL_SRC(reg, x), 0) |
CHANNEL_SRC(GET_CHANNEL_SRC(reg, y), 1) |
CHANNEL_SRC(GET_CHANNEL_SRC(reg, z), 2) |
CHANNEL_SRC(GET_CHANNEL_SRC(reg, w), 3));
}
/* Another neat thing about the UREG representation:
*/
static inline int
negate(int reg, int x, int y, int z, int w)
{
return reg ^ (((x & 1) << UREG_CHANNEL_X_NEGATE_SHIFT) |
((y & 1) << UREG_CHANNEL_Y_NEGATE_SHIFT) |
((z & 1) << UREG_CHANNEL_Z_NEGATE_SHIFT) |
((w & 1) << UREG_CHANNEL_W_NEGATE_SHIFT));
}
extern GLuint i915_get_temp(struct i915_fragment_program *p);
extern GLuint i915_get_utemp(struct i915_fragment_program *p);
extern void i915_release_utemps(struct i915_fragment_program *p);
extern GLuint i915_emit_texld(struct i915_fragment_program *p,
GLuint live_regs,
GLuint dest,
GLuint destmask,
GLuint sampler, GLuint coord, GLuint op);
extern GLuint i915_emit_arith(struct i915_fragment_program *p,
GLuint op,
GLuint dest,
GLuint mask,
GLuint saturate,
GLuint src0, GLuint src1, GLuint src2);
extern GLuint i915_emit_decl(struct i915_fragment_program *p,
GLuint type, GLuint nr, GLuint d0_flags);
extern GLuint i915_emit_const1f(struct i915_fragment_program *p, GLfloat c0);
extern GLuint i915_emit_const2f(struct i915_fragment_program *p,
GLfloat c0, GLfloat c1);
extern GLuint i915_emit_const4fv(struct i915_fragment_program *p,
const GLfloat * c);
extern GLuint i915_emit_const4f(struct i915_fragment_program *p,
GLfloat c0, GLfloat c1,
GLfloat c2, GLfloat c3);
extern GLuint i915_emit_param4fv(struct i915_fragment_program *p,
const GLfloat * values);
extern void i915_program_error(struct i915_fragment_program *p,
const char *fmt, ...);
extern void i915_init_program(struct i915_context *i915,
struct i915_fragment_program *p);
extern void i915_upload_program(struct i915_context *i915,
struct i915_fragment_program *p);
extern void i915_fini_program(struct i915_fragment_program *p);
extern void i915_update_program(struct gl_context *ctx);
#endif

View File

@ -1,730 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef _I915_REG_H_
#define _I915_REG_H_
#include "intel_reg.h"
#define I915_SET_FIELD( var, mask, value ) (var &= ~(mask), var |= value)
#define PRIM3D_INLINE (CMD_3D | (0x1f<<24))
#define PRIM3D_TRILIST (0x0<<18)
#define PRIM3D_TRISTRIP (0x1<<18)
#define PRIM3D_TRISTRIP_RVRSE (0x2<<18)
#define PRIM3D_TRIFAN (0x3<<18)
#define PRIM3D_POLY (0x4<<18)
#define PRIM3D_LINELIST (0x5<<18)
#define PRIM3D_LINESTRIP (0x6<<18)
#define PRIM3D_RECTLIST (0x7<<18)
#define PRIM3D_POINTLIST (0x8<<18)
#define PRIM3D_DIB (0x9<<18)
#define PRIM3D_CLEAR_RECT (0xa<<18)
#define PRIM3D_ZONE_INIT (0xd<<18)
#define PRIM3D_MASK (0x1f<<18)
/* p137 */
#define _3DSTATE_AA_CMD (CMD_3D | (0x06<<24))
#define AA_LINE_ECAAR_WIDTH_ENABLE (1<<16)
#define AA_LINE_ECAAR_WIDTH_0_5 0
#define AA_LINE_ECAAR_WIDTH_1_0 (1<<14)
#define AA_LINE_ECAAR_WIDTH_2_0 (2<<14)
#define AA_LINE_ECAAR_WIDTH_4_0 (3<<14)
#define AA_LINE_REGION_WIDTH_ENABLE (1<<8)
#define AA_LINE_REGION_WIDTH_0_5 0
#define AA_LINE_REGION_WIDTH_1_0 (1<<6)
#define AA_LINE_REGION_WIDTH_2_0 (2<<6)
#define AA_LINE_REGION_WIDTH_4_0 (3<<6)
/* 3DSTATE_BACKFACE_STENCIL_OPS, p138*/
#define _3DSTATE_BACKFACE_STENCIL_OPS (CMD_3D | (0x8<<24))
#define BFO_ENABLE_STENCIL_REF (1<<23)
#define BFO_STENCIL_REF_SHIFT 15
#define BFO_STENCIL_REF_MASK (0xff<<15)
#define BFO_ENABLE_STENCIL_FUNCS (1<<14)
#define BFO_STENCIL_TEST_SHIFT 11
#define BFO_STENCIL_TEST_MASK (0x7<<11)
#define BFO_STENCIL_FAIL_SHIFT 8
#define BFO_STENCIL_FAIL_MASK (0x7<<8)
#define BFO_STENCIL_PASS_Z_FAIL_SHIFT 5
#define BFO_STENCIL_PASS_Z_FAIL_MASK (0x7<<5)
#define BFO_STENCIL_PASS_Z_PASS_SHIFT 2
#define BFO_STENCIL_PASS_Z_PASS_MASK (0x7<<2)
#define BFO_ENABLE_STENCIL_TWO_SIDE (1<<1)
#define BFO_STENCIL_TWO_SIDE (1<<0)
/* 3DSTATE_BACKFACE_STENCIL_MASKS, p140 */
#define _3DSTATE_BACKFACE_STENCIL_MASKS (CMD_3D | (0x9<<24))
#define BFM_ENABLE_STENCIL_TEST_MASK (1<<17)
#define BFM_ENABLE_STENCIL_WRITE_MASK (1<<16)
#define BFM_STENCIL_TEST_MASK_SHIFT 8
#define BFM_STENCIL_TEST_MASK_MASK (0xff<<8)
#define BFM_STENCIL_TEST_MASK(x) (((x)&0xff) << 8)
#define BFM_STENCIL_WRITE_MASK_SHIFT 0
#define BFM_STENCIL_WRITE_MASK_MASK (0xff<<0)
#define BFM_STENCIL_WRITE_MASK(x) ((x)&0xff)
/* 3DSTATE_BIN_CONTROL p141 */
/* 3DSTATE_CHROMA_KEY */
/* 3DSTATE_CLEAR_PARAMETERS, p150 */
/*
* Sets the color, depth and stencil clear values used by the
* CLEAR_RECT and ZONE_INIT primitive types, respectively. These
* primitives set override most 3d state and only take a minimal x/y
* vertex. The color/z/stencil information is supplied here and
* therefore cannot vary per vertex.
*/
#define _3DSTATE_CLEAR_PARAMETERS (CMD_3D | (0x1d<<24) | (0x9c<<16) | 5)
/* Dword 1 */
#define CLEARPARAM_CLEAR_RECT (1 << 16)
#define CLEARPARAM_ZONE_INIT (0 << 16)
#define CLEARPARAM_WRITE_COLOR (1 << 2)
#define CLEARPARAM_WRITE_DEPTH (1 << 1)
#define CLEARPARAM_WRITE_STENCIL (1 << 0)
/* 3DSTATE_CONSTANT_BLEND_COLOR, p153 */
#define _3DSTATE_CONST_BLEND_COLOR_CMD (CMD_3D | (0x1d<<24) | (0x88<<16))
/* 3DSTATE_COORD_SET_BINDINGS, p154 */
#define _3DSTATE_COORD_SET_BINDINGS (CMD_3D | (0x16<<24))
#define CSB_TCB(iunit, eunit) ((eunit)<<(iunit*3))
/* p156 */
#define _3DSTATE_DFLT_DIFFUSE_CMD (CMD_3D | (0x1d<<24) | (0x99<<16))
/* p157 */
#define _3DSTATE_DFLT_SPEC_CMD (CMD_3D | (0x1d<<24) | (0x9a<<16))
/* p158 */
#define _3DSTATE_DFLT_Z_CMD (CMD_3D | (0x1d<<24) | (0x98<<16))
/* 3DSTATE_DEPTH_OFFSET_SCALE, p159 */
#define _3DSTATE_DEPTH_OFFSET_SCALE (CMD_3D | (0x1d<<24) | (0x97<<16))
/* scale in dword 1 */
/* 3DSTATE_DEPTH_SUBRECT_DISABLE, p160 */
#define _3DSTATE_DEPTH_SUBRECT_DISABLE (CMD_3D | (0x1c<<24) | (0x11<<19) | 0x2)
/* p161 */
#define _3DSTATE_DST_BUF_VARS_CMD (CMD_3D | (0x1d<<24) | (0x85<<16))
/* Dword 1 */
#define CLASSIC_EARLY_DEPTH (1<<31)
#define TEX_DEFAULT_COLOR_OGL (0<<30)
#define TEX_DEFAULT_COLOR_D3D (1<<30)
#define ZR_EARLY_DEPTH (1<<29)
#define LOD_PRECLAMP_OGL (1<<28)
#define LOD_PRECLAMP_D3D (0<<28)
#define DITHER_FULL_ALWAYS (0<<26)
#define DITHER_FULL_ON_FB_BLEND (1<<26)
#define DITHER_CLAMPED_ALWAYS (2<<26)
#define LINEAR_GAMMA_BLEND_32BPP (1<<25)
#define DEBUG_DISABLE_ENH_DITHER (1<<24)
#define DSTORG_HORT_BIAS(x) ((x)<<20)
#define DSTORG_VERT_BIAS(x) ((x)<<16)
#define COLOR_4_2_2_CHNL_WRT_ALL 0
#define COLOR_4_2_2_CHNL_WRT_Y (1<<12)
#define COLOR_4_2_2_CHNL_WRT_CR (2<<12)
#define COLOR_4_2_2_CHNL_WRT_CB (3<<12)
#define COLOR_4_2_2_CHNL_WRT_CRCB (4<<12)
#define COLR_BUF_8BIT 0
#define COLR_BUF_RGB555 (1<<8)
#define COLR_BUF_RGB565 (2<<8)
#define COLR_BUF_ARGB8888 (3<<8)
#define DEPTH_FRMT_16_FIXED 0
#define DEPTH_FRMT_16_FLOAT (1<<2)
#define DEPTH_FRMT_24_FIXED_8_OTHER (2<<2)
#define VERT_LINE_STRIDE_1 (1<<1)
#define VERT_LINE_STRIDE_0 (0<<1)
#define VERT_LINE_STRIDE_OFS_1 1
#define VERT_LINE_STRIDE_OFS_0 0
/* p166 */
#define _3DSTATE_DRAW_RECT_CMD (CMD_3D|(0x1d<<24)|(0x80<<16)|3)
/* Dword 1 */
#define DRAW_RECT_DIS_DEPTH_OFS (1<<30)
#define DRAW_DITHER_OFS_X(x) ((x)<<26)
#define DRAW_DITHER_OFS_Y(x) ((x)<<24)
/* Dword 2 */
#define DRAW_YMIN(x) ((x)<<16)
#define DRAW_XMIN(x) (x)
/* Dword 3 */
#define DRAW_YMAX(x) ((x)<<16)
#define DRAW_XMAX(x) (x)
/* Dword 4 */
#define DRAW_YORG(x) ((x)<<16)
#define DRAW_XORG(x) (x)
/* 3DSTATE_FILTER_COEFFICIENTS_4X4, p170 */
/* 3DSTATE_FILTER_COEFFICIENTS_6X5, p172 */
/* _3DSTATE_FOG_COLOR, p173 */
#define _3DSTATE_FOG_COLOR_CMD (CMD_3D|(0x15<<24))
#define FOG_COLOR_RED(x) ((x)<<16)
#define FOG_COLOR_GREEN(x) ((x)<<8)
#define FOG_COLOR_BLUE(x) (x)
/* _3DSTATE_FOG_MODE, p174 */
#define _3DSTATE_FOG_MODE_CMD (CMD_3D|(0x1d<<24)|(0x89<<16)|2)
/* Dword 1 */
#define FMC1_FOGFUNC_MODIFY_ENABLE (1<<31)
#define FMC1_FOGFUNC_VERTEX (0<<28)
#define FMC1_FOGFUNC_PIXEL_EXP (1<<28)
#define FMC1_FOGFUNC_PIXEL_EXP2 (2<<28)
#define FMC1_FOGFUNC_PIXEL_LINEAR (3<<28)
#define FMC1_FOGFUNC_MASK (3<<28)
#define FMC1_FOGINDEX_MODIFY_ENABLE (1<<27)
#define FMC1_FOGINDEX_Z (0<<25)
#define FMC1_FOGINDEX_W (1<<25)
#define FMC1_C1_C2_MODIFY_ENABLE (1<<24)
#define FMC1_DENSITY_MODIFY_ENABLE (1<<23)
#define FMC1_C1_ONE (1<<13)
#define FMC1_C1_MASK (0xffff<<4)
/* Dword 2 */
#define FMC2_C2_ONE (1<<16)
/* Dword 3 */
#define FMC3_D_ONE (1<<16)
/* _3DSTATE_INDEPENDENT_ALPHA_BLEND, p177 */
#define _3DSTATE_INDEPENDENT_ALPHA_BLEND_CMD (CMD_3D|(0x0b<<24))
#define IAB_MODIFY_ENABLE (1<<23)
#define IAB_ENABLE (1<<22)
#define IAB_MODIFY_FUNC (1<<21)
#define IAB_FUNC_SHIFT 16
#define IAB_MODIFY_SRC_FACTOR (1<<11)
#define IAB_SRC_FACTOR_SHIFT 6
#define IAB_SRC_FACTOR_MASK (BLENDFACT_MASK<<6)
#define IAB_MODIFY_DST_FACTOR (1<<5)
#define IAB_DST_FACTOR_SHIFT 0
#define IAB_DST_FACTOR_MASK (BLENDFACT_MASK<<0)
#define BLENDFUNC_ADD 0x0
#define BLENDFUNC_SUBTRACT 0x1
#define BLENDFUNC_REVERSE_SUBTRACT 0x2
#define BLENDFUNC_MIN 0x3
#define BLENDFUNC_MAX 0x4
#define BLENDFUNC_MASK 0x7
/* 3DSTATE_LOAD_INDIRECT, p180 */
#define _3DSTATE_LOAD_INDIRECT (CMD_3D|(0x1d<<24)|(0x7<<16))
#define LI0_STATE_STATIC_INDIRECT (0x01<<8)
#define LI0_STATE_DYNAMIC_INDIRECT (0x02<<8)
#define LI0_STATE_SAMPLER (0x04<<8)
#define LI0_STATE_MAP (0x08<<8)
#define LI0_STATE_PROGRAM (0x10<<8)
#define LI0_STATE_CONSTANTS (0x20<<8)
#define SIS0_BUFFER_ADDRESS(x) ((x)&~0x3)
#define SIS0_FORCE_LOAD (1<<1)
#define SIS0_BUFFER_VALID (1<<0)
#define SIS1_BUFFER_LENGTH(x) ((x)&0xff)
#define DIS0_BUFFER_ADDRESS(x) ((x)&~0x3)
#define DIS0_BUFFER_RESET (1<<1)
#define DIS0_BUFFER_VALID (1<<0)
#define SSB0_BUFFER_ADDRESS(x) ((x)&~0x3)
#define SSB0_FORCE_LOAD (1<<1)
#define SSB0_BUFFER_VALID (1<<0)
#define SSB1_BUFFER_LENGTH(x) ((x)&0xff)
#define MSB0_BUFFER_ADDRESS(x) ((x)&~0x3)
#define MSB0_FORCE_LOAD (1<<1)
#define MSB0_BUFFER_VALID (1<<0)
#define MSB1_BUFFER_LENGTH(x) ((x)&0xff)
#define PSP0_BUFFER_ADDRESS(x) ((x)&~0x3)
#define PSP0_FORCE_LOAD (1<<1)
#define PSP0_BUFFER_VALID (1<<0)
#define PSP1_BUFFER_LENGTH(x) ((x)&0xff)
#define PSC0_BUFFER_ADDRESS(x) ((x)&~0x3)
#define PSC0_FORCE_LOAD (1<<1)
#define PSC0_BUFFER_VALID (1<<0)
#define PSC1_BUFFER_LENGTH(x) ((x)&0xff)
/* _3DSTATE_RASTERIZATION_RULES */
#define _3DSTATE_RASTER_RULES_CMD (CMD_3D|(0x07<<24))
#define ENABLE_POINT_RASTER_RULE (1<<15)
#define OGL_POINT_RASTER_RULE (1<<13)
#define ENABLE_TEXKILL_3D_4D (1<<10)
#define TEXKILL_3D (0<<9)
#define TEXKILL_4D (1<<9)
#define ENABLE_LINE_STRIP_PROVOKE_VRTX (1<<8)
#define ENABLE_TRI_FAN_PROVOKE_VRTX (1<<5)
#define LINE_STRIP_PROVOKE_VRTX_MASK (3 << 6)
#define LINE_STRIP_PROVOKE_VRTX(x) ((x)<<6)
#define TRI_FAN_PROVOKE_VRTX_MASK (3 << 3)
#define TRI_FAN_PROVOKE_VRTX(x) ((x)<<3)
/* _3DSTATE_SCISSOR_ENABLE, p256 */
#define _3DSTATE_SCISSOR_ENABLE_CMD (CMD_3D|(0x1c<<24)|(0x10<<19))
#define ENABLE_SCISSOR_RECT ((1<<1) | 1)
#define DISABLE_SCISSOR_RECT (1<<1)
/* _3DSTATE_SCISSOR_RECTANGLE_0, p257 */
#define _3DSTATE_SCISSOR_RECT_0_CMD (CMD_3D|(0x1d<<24)|(0x81<<16)|1)
/* Dword 1 */
#define SCISSOR_RECT_0_YMIN(x) ((x)<<16)
#define SCISSOR_RECT_0_XMIN(x) (x)
/* Dword 2 */
#define SCISSOR_RECT_0_YMAX(x) ((x)<<16)
#define SCISSOR_RECT_0_XMAX(x) (x)
/* Helper macros for blend factors
*/
#define DST_BLND_FACT(f) ((f)<<S6_CBUF_DST_BLEND_FACT_SHIFT)
#define SRC_BLND_FACT(f) ((f)<<S6_CBUF_SRC_BLEND_FACT_SHIFT)
#define DST_ABLND_FACT(f) ((f)<<IAB_DST_FACTOR_SHIFT)
#define SRC_ABLND_FACT(f) ((f)<<IAB_SRC_FACTOR_SHIFT)
/* 3DSTATE_MAP_DEINTERLACER_PARAMETERS */
/* 3DSTATE_MAP_PALETTE_LOAD_32, p206 */
#define _3DSTATE_MAP_PALETTE_LOAD_32 (CMD_3D|(0x1d<<24)|(0x8f<<16))
/* subsequent dwords up to length (max 16) are ARGB8888 color values */
/* _3DSTATE_MODES_4, p218 */
#define _3DSTATE_MODES_4_CMD (CMD_3D|(0x0d<<24))
#define ENABLE_LOGIC_OP_FUNC (1<<23)
#define LOGIC_OP_FUNC(x) ((x)<<18)
#define LOGICOP_MASK (0xf<<18)
#define MODE4_ENABLE_STENCIL_TEST_MASK ((1<<17)|(0xff00))
#define ENABLE_STENCIL_TEST_MASK (1<<17)
#define STENCIL_TEST_MASK(x) (((x)&0xff)<<8)
#define MODE4_ENABLE_STENCIL_WRITE_MASK ((1<<16)|(0x00ff))
#define ENABLE_STENCIL_WRITE_MASK (1<<16)
#define STENCIL_WRITE_MASK(x) ((x)&0xff)
/* _3DSTATE_MODES_5, p220 */
#define _3DSTATE_MODES_5_CMD (CMD_3D|(0x0c<<24))
#define PIPELINE_FLUSH_RENDER_CACHE (1<<18)
#define PIPELINE_FLUSH_TEXTURE_CACHE (1<<16)
/* p221 */
#define _3DSTATE_PIXEL_SHADER_CONSTANTS (CMD_3D|(0x1d<<24)|(0x6<<16))
#define PS1_REG(n) (1<<(n))
#define PS2_CONST_X(n) (n)
#define PS3_CONST_Y(n) (n)
#define PS4_CONST_Z(n) (n)
#define PS5_CONST_W(n) (n)
/* p222 */
/* Each instruction is 3 dwords long, though most don't require all
* this space. Maximum of 123 instructions. Smaller maxes per insn
* type.
*/
#define _3DSTATE_PIXEL_SHADER_PROGRAM (CMD_3D|(0x1d<<24)|(0x5<<16))
#define REG_TYPE_R 0 /* temporary regs, no need to
* dcl, must be written before
* read -- Preserved between
* phases.
*/
#define REG_TYPE_T 1 /* Interpolated values, must be
* dcl'ed before use.
*
* 0..7: texture coord,
* 8: diffuse spec,
* 9: specular color,
* 10: fog parameter in w.
*/
#define REG_TYPE_CONST 2 /* Restriction: only one const
* can be referenced per
* instruction, though it may be
* selected for multiple inputs.
* Constants not initialized
* default to zero.
*/
#define REG_TYPE_S 3 /* sampler */
#define REG_TYPE_OC 4 /* output color (rgba) */
#define REG_TYPE_OD 5 /* output depth (w), xyz are
* temporaries. If not written,
* interpolated depth is used?
*/
#define REG_TYPE_U 6 /* unpreserved temporaries */
#define REG_TYPE_MASK 0x7
#define REG_NR_MASK 0xf
/* REG_TYPE_T:
*/
#define T_TEX0 0
#define T_TEX1 1
#define T_TEX2 2
#define T_TEX3 3
#define T_TEX4 4
#define T_TEX5 5
#define T_TEX6 6
#define T_TEX7 7
#define T_DIFFUSE 8
#define T_SPECULAR 9
#define T_FOG_W 10 /* interpolated fog is in W coord */
/* Arithmetic instructions */
/* .replicate_swizzle == selection and replication of a particular
* scalar channel, ie., .xxxx, .yyyy, .zzzz or .wwww
*/
#define A0_NOP (0x0<<24) /* no operation */
#define A0_ADD (0x1<<24) /* dst = src0 + src1 */
#define A0_MOV (0x2<<24) /* dst = src0 */
#define A0_MUL (0x3<<24) /* dst = src0 * src1 */
#define A0_MAD (0x4<<24) /* dst = src0 * src1 + src2 */
#define A0_DP2ADD (0x5<<24) /* dst.xyzw = src0.xy dot src1.xy + src2.replicate_swizzle */
#define A0_DP3 (0x6<<24) /* dst.xyzw = src0.xyz dot src1.xyz */
#define A0_DP4 (0x7<<24) /* dst.xyzw = src0.xyzw dot src1.xyzw */
#define A0_FRC (0x8<<24) /* dst = src0 - floor(src0) */
#define A0_RCP (0x9<<24) /* dst.xyzw = 1/(src0.replicate_swizzle) */
#define A0_RSQ (0xa<<24) /* dst.xyzw = 1/(sqrt(abs(src0.replicate_swizzle))) */
#define A0_EXP (0xb<<24) /* dst.xyzw = exp2(src0.replicate_swizzle) */
#define A0_LOG (0xc<<24) /* dst.xyzw = log2(abs(src0.replicate_swizzle)) */
#define A0_CMP (0xd<<24) /* dst = (src0 >= 0.0) ? src1 : src2 */
#define A0_MIN (0xe<<24) /* dst = (src0 < src1) ? src0 : src1 */
#define A0_MAX (0xf<<24) /* dst = (src0 >= src1) ? src0 : src1 */
#define A0_FLR (0x10<<24) /* dst = floor(src0) */
#define A0_MOD (0x11<<24) /* dst = src0 fmod 1.0 */
#define A0_TRC (0x12<<24) /* dst = int(src0) */
#define A0_SGE (0x13<<24) /* dst = src0 >= src1 ? 1.0 : 0.0 */
#define A0_SLT (0x14<<24) /* dst = src0 < src1 ? 1.0 : 0.0 */
#define A0_DEST_SATURATE (1<<22)
#define A0_DEST_TYPE_SHIFT 19
/* Allow: R, OC, OD, U */
#define A0_DEST_NR_SHIFT 14
/* Allow R: 0..15, OC,OD: 0..0, U: 0..2 */
#define A0_DEST_CHANNEL_X (1<<10)
#define A0_DEST_CHANNEL_Y (2<<10)
#define A0_DEST_CHANNEL_Z (4<<10)
#define A0_DEST_CHANNEL_W (8<<10)
#define A0_DEST_CHANNEL_ALL (0xf<<10)
#define A0_DEST_CHANNEL_SHIFT 10
#define A0_SRC0_TYPE_SHIFT 7
#define A0_SRC0_NR_SHIFT 2
#define A0_DEST_CHANNEL_XY (A0_DEST_CHANNEL_X|A0_DEST_CHANNEL_Y)
#define A0_DEST_CHANNEL_XYZ (A0_DEST_CHANNEL_XY|A0_DEST_CHANNEL_Z)
#define SRC_X 0
#define SRC_Y 1
#define SRC_Z 2
#define SRC_W 3
#define SRC_ZERO 4
#define SRC_ONE 5
#define A1_SRC0_CHANNEL_X_NEGATE (1<<31)
#define A1_SRC0_CHANNEL_X_SHIFT 28
#define A1_SRC0_CHANNEL_Y_NEGATE (1<<27)
#define A1_SRC0_CHANNEL_Y_SHIFT 24
#define A1_SRC0_CHANNEL_Z_NEGATE (1<<23)
#define A1_SRC0_CHANNEL_Z_SHIFT 20
#define A1_SRC0_CHANNEL_W_NEGATE (1<<19)
#define A1_SRC0_CHANNEL_W_SHIFT 16
#define A1_SRC1_TYPE_SHIFT 13
#define A1_SRC1_NR_SHIFT 8
#define A1_SRC1_CHANNEL_X_NEGATE (1<<7)
#define A1_SRC1_CHANNEL_X_SHIFT 4
#define A1_SRC1_CHANNEL_Y_NEGATE (1<<3)
#define A1_SRC1_CHANNEL_Y_SHIFT 0
#define A2_SRC1_CHANNEL_Z_NEGATE (1<<31)
#define A2_SRC1_CHANNEL_Z_SHIFT 28
#define A2_SRC1_CHANNEL_W_NEGATE (1<<27)
#define A2_SRC1_CHANNEL_W_SHIFT 24
#define A2_SRC2_TYPE_SHIFT 21
#define A2_SRC2_NR_SHIFT 16
#define A2_SRC2_CHANNEL_X_NEGATE (1<<15)
#define A2_SRC2_CHANNEL_X_SHIFT 12
#define A2_SRC2_CHANNEL_Y_NEGATE (1<<11)
#define A2_SRC2_CHANNEL_Y_SHIFT 8
#define A2_SRC2_CHANNEL_Z_NEGATE (1<<7)
#define A2_SRC2_CHANNEL_Z_SHIFT 4
#define A2_SRC2_CHANNEL_W_NEGATE (1<<3)
#define A2_SRC2_CHANNEL_W_SHIFT 0
/* Texture instructions */
#define T0_TEXLD (0x15<<24) /* Sample texture using predeclared
* sampler and address, and output
* filtered texel data to destination
* register */
#define T0_TEXLDP (0x16<<24) /* Same as texld but performs a
* perspective divide of the texture
* coordinate .xyz values by .w before
* sampling. */
#define T0_TEXLDB (0x17<<24) /* Same as texld but biases the
* computed LOD by w. Only S4.6 two's
* comp is used. This implies that a
* float to fixed conversion is
* done. */
#define T0_TEXKILL (0x18<<24) /* Does not perform a sampling
* operation. Simply kills the pixel
* if any channel of the address
* register is < 0.0. */
#define T0_DEST_TYPE_SHIFT 19
/* Allow: R, OC, OD, U */
/* Note: U (unpreserved) regs do not retain their values between
* phases (cannot be used for feedback)
*
* Note: oC and OD registers can only be used as the destination of a
* texture instruction once per phase (this is an implementation
* restriction).
*/
#define T0_DEST_NR_SHIFT 14
/* Allow R: 0..15, OC,OD: 0..0, U: 0..2 */
#define T0_SAMPLER_NR_SHIFT 0 /* This field ignored for TEXKILL */
#define T0_SAMPLER_NR_MASK (0xf<<0)
#define T1_ADDRESS_REG_TYPE_SHIFT 24 /* Reg to use as texture coord */
/* Allow R, T, OC, OD -- R, OC, OD are 'dependent' reads, new program phase */
#define T1_ADDRESS_REG_NR_SHIFT 17
#define T2_MBZ 0
/* Declaration instructions */
#define D0_DCL (0x19<<24) /* Declare a t (interpolated attrib)
* register or an s (sampler)
* register. */
#define D0_SAMPLE_TYPE_SHIFT 22
#define D0_SAMPLE_TYPE_2D (0x0<<22)
#define D0_SAMPLE_TYPE_CUBE (0x1<<22)
#define D0_SAMPLE_TYPE_VOLUME (0x2<<22)
#define D0_SAMPLE_TYPE_MASK (0x3<<22)
#define D0_TYPE_SHIFT 19
/* Allow: T, S */
#define D0_NR_SHIFT 14
/* Allow T: 0..10, S: 0..15 */
#define D0_CHANNEL_X (1<<10)
#define D0_CHANNEL_Y (2<<10)
#define D0_CHANNEL_Z (4<<10)
#define D0_CHANNEL_W (8<<10)
#define D0_CHANNEL_ALL (0xf<<10)
#define D0_CHANNEL_NONE (0<<10)
#define D0_CHANNEL_XY (D0_CHANNEL_X|D0_CHANNEL_Y)
#define D0_CHANNEL_XYZ (D0_CHANNEL_XY|D0_CHANNEL_Z)
/* I915 Errata: Do not allow (xz), (xw), (xzw) combinations for diffuse
* or specular declarations.
*
* For T dcls, only allow: (x), (xy), (xyz), (w), (xyzw)
*
* Must be zero for S (sampler) dcls
*/
#define D1_MBZ 0
#define D2_MBZ 0
/* p207 */
#define _3DSTATE_MAP_STATE (CMD_3D|(0x1d<<24)|(0x0<<16))
#define MS1_MAPMASK_SHIFT 0
#define MS1_MAPMASK_MASK (0x8fff<<0)
#define MS2_UNTRUSTED_SURFACE (1<<31)
#define MS2_ADDRESS_MASK 0xfffffffc
#define MS2_VERTICAL_LINE_STRIDE (1<<1)
#define MS2_VERTICAL_OFFSET (1<<1)
#define MS3_HEIGHT_SHIFT 21
#define MS3_WIDTH_SHIFT 10
#define MS3_PALETTE_SELECT (1<<9)
#define MS3_MAPSURF_FORMAT_SHIFT 7
#define MS3_MAPSURF_FORMAT_MASK (0x7<<7)
#define MAPSURF_8BIT (1<<7)
#define MAPSURF_16BIT (2<<7)
#define MAPSURF_32BIT (3<<7)
#define MAPSURF_422 (5<<7)
#define MAPSURF_COMPRESSED (6<<7)
#define MAPSURF_4BIT_INDEXED (7<<7)
#define MS3_MT_FORMAT_MASK (0x7 << 3)
#define MS3_MT_FORMAT_SHIFT 3
#define MT_4BIT_IDX_ARGB8888 (7<<3) /* SURFACE_4BIT_INDEXED */
#define MT_8BIT_I8 (0<<3) /* SURFACE_8BIT */
#define MT_8BIT_L8 (1<<3)
#define MT_8BIT_A8 (4<<3)
#define MT_8BIT_MONO8 (5<<3)
#define MT_16BIT_RGB565 (0<<3) /* SURFACE_16BIT */
#define MT_16BIT_ARGB1555 (1<<3)
#define MT_16BIT_ARGB4444 (2<<3)
#define MT_16BIT_AY88 (3<<3)
#define MT_16BIT_88DVDU (5<<3)
#define MT_16BIT_BUMP_655LDVDU (6<<3)
#define MT_16BIT_I16 (7<<3)
#define MT_16BIT_L16 (8<<3)
#define MT_16BIT_A16 (9<<3)
#define MT_32BIT_ARGB8888 (0<<3) /* SURFACE_32BIT */
#define MT_32BIT_ABGR8888 (1<<3)
#define MT_32BIT_XRGB8888 (2<<3)
#define MT_32BIT_XBGR8888 (3<<3)
#define MT_32BIT_QWVU8888 (4<<3)
#define MT_32BIT_AXVU8888 (5<<3)
#define MT_32BIT_LXVU8888 (6<<3)
#define MT_32BIT_XLVU8888 (7<<3)
#define MT_32BIT_ARGB2101010 (8<<3)
#define MT_32BIT_ABGR2101010 (9<<3)
#define MT_32BIT_AWVU2101010 (0xA<<3)
#define MT_32BIT_GR1616 (0xB<<3)
#define MT_32BIT_VU1616 (0xC<<3)
#define MT_32BIT_x8I24 (0xD<<3)
#define MT_32BIT_x8L24 (0xE<<3)
#define MT_32BIT_x8A24 (0xF<<3)
#define MT_422_YCRCB_SWAPY (0<<3) /* SURFACE_422 */
#define MT_422_YCRCB_NORMAL (1<<3)
#define MT_422_YCRCB_SWAPUV (2<<3)
#define MT_422_YCRCB_SWAPUVY (3<<3)
#define MT_COMPRESS_DXT1 (0<<3) /* SURFACE_COMPRESSED */
#define MT_COMPRESS_DXT2_3 (1<<3)
#define MT_COMPRESS_DXT4_5 (2<<3)
#define MT_COMPRESS_FXT1 (3<<3)
#define MT_COMPRESS_DXT1_RGB (4<<3)
#define MS3_USE_FENCE_REGS (1<<2)
#define MS3_TILED_SURFACE (1<<1)
#define MS3_TILE_WALK (1<<0)
#define MS4_PITCH_SHIFT 21
#define MS4_CUBE_FACE_ENA_NEGX (1<<20)
#define MS4_CUBE_FACE_ENA_POSX (1<<19)
#define MS4_CUBE_FACE_ENA_NEGY (1<<18)
#define MS4_CUBE_FACE_ENA_POSY (1<<17)
#define MS4_CUBE_FACE_ENA_NEGZ (1<<16)
#define MS4_CUBE_FACE_ENA_POSZ (1<<15)
#define MS4_CUBE_FACE_ENA_MASK (0x3f<<15)
#define MS4_MAX_LOD_SHIFT 9
#define MS4_MAX_LOD_MASK (0x3f<<9)
#define MS4_MIP_LAYOUT_LEGACY (0<<8)
#define MS4_MIP_LAYOUT_BELOW_LPT (0<<8)
#define MS4_MIP_LAYOUT_RIGHT_LPT (1<<8)
#define MS4_VOLUME_DEPTH_SHIFT 0
#define MS4_VOLUME_DEPTH_MASK (0xff<<0)
/* p244 */
#define _3DSTATE_SAMPLER_STATE (CMD_3D|(0x1d<<24)|(0x1<<16))
#define SS1_MAPMASK_SHIFT 0
#define SS1_MAPMASK_MASK (0x8fff<<0)
#define SS2_REVERSE_GAMMA_ENABLE (1<<31)
#define SS2_PACKED_TO_PLANAR_ENABLE (1<<30)
#define SS2_COLORSPACE_CONVERSION (1<<29)
#define SS2_CHROMAKEY_SHIFT 27
#define SS2_BASE_MIP_LEVEL_SHIFT 22
#define SS2_BASE_MIP_LEVEL_MASK (0x1f<<22)
#define SS2_MIP_FILTER_SHIFT 20
#define SS2_MIP_FILTER_MASK (0x3<<20)
#define MIPFILTER_NONE 0
#define MIPFILTER_NEAREST 1
#define MIPFILTER_LINEAR 3
#define SS2_MAG_FILTER_SHIFT 17
#define SS2_MAG_FILTER_MASK (0x7<<17)
#define FILTER_NEAREST 0
#define FILTER_LINEAR 1
#define FILTER_ANISOTROPIC 2
#define FILTER_4X4_1 3
#define FILTER_4X4_2 4
#define FILTER_4X4_FLAT 5
#define FILTER_6X5_MONO 6 /* XXX - check */
#define SS2_MIN_FILTER_SHIFT 14
#define SS2_MIN_FILTER_MASK (0x7<<14)
#define SS2_LOD_BIAS_SHIFT 5
#define SS2_LOD_BIAS_ONE (0x10<<5)
#define SS2_LOD_BIAS_MASK (0x1ff<<5)
/* Shadow requires:
* MT_X8{I,L,A}24 or MT_{I,L,A}16 texture format
* FILTER_4X4_x MIN and MAG filters
*/
#define SS2_SHADOW_ENABLE (1<<4)
#define SS2_MAX_ANISO_MASK (1<<3)
#define SS2_MAX_ANISO_2 (0<<3)
#define SS2_MAX_ANISO_4 (1<<3)
#define SS2_SHADOW_FUNC_SHIFT 0
#define SS2_SHADOW_FUNC_MASK (0x7<<0)
/* SS2_SHADOW_FUNC values: see COMPAREFUNC_* */
#define SS3_MIN_LOD_SHIFT 24
#define SS3_MIN_LOD_ONE (0x10<<24)
#define SS3_MIN_LOD_MASK (0xff<<24)
#define SS3_KILL_PIXEL_ENABLE (1<<17)
#define SS3_TCX_ADDR_MODE_SHIFT 12
#define SS3_TCX_ADDR_MODE_MASK (0x7<<12)
#define TEXCOORDMODE_WRAP 0
#define TEXCOORDMODE_MIRROR 1
#define TEXCOORDMODE_CLAMP_EDGE 2
#define TEXCOORDMODE_CUBE 3
#define TEXCOORDMODE_CLAMP_BORDER 4
#define TEXCOORDMODE_MIRROR_ONCE 5
#define SS3_TCY_ADDR_MODE_SHIFT 9
#define SS3_TCY_ADDR_MODE_MASK (0x7<<9)
#define SS3_TCZ_ADDR_MODE_SHIFT 6
#define SS3_TCZ_ADDR_MODE_MASK (0x7<<6)
#define SS3_NORMALIZED_COORDS (1<<5)
#define SS3_TEXTUREMAP_INDEX_SHIFT 1
#define SS3_TEXTUREMAP_INDEX_MASK (0xf<<1)
#define SS3_DEINTERLACER_ENABLE (1<<0)
#define SS4_BORDER_COLOR_MASK (~0)
/* 3DSTATE_SPAN_STIPPLE, p258
*/
#define _3DSTATE_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
#define ST1_ENABLE (1<<16)
#define ST1_MASK (0xffff)
#define _3DSTATE_DEFAULT_Z ((0x3<<29)|(0x1d<<24)|(0x98<<16))
#define _3DSTATE_DEFAULT_DIFFUSE ((0x3<<29)|(0x1d<<24)|(0x99<<16))
#define _3DSTATE_DEFAULT_SPECULAR ((0x3<<29)|(0x1d<<24)|(0x9a<<16))
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,481 +0,0 @@
/**************************************************************************
*
* Copyright 2006 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/** @file i915_tex_layout.c
* Code to layout images in a mipmap tree for i830M-GM915 and G945 and beyond.
*/
#include "intel_mipmap_tree.h"
#include "intel_tex_layout.h"
#include "main/macros.h"
#include "intel_context.h"
#define FILE_DEBUG_FLAG DEBUG_TEXTURE
static GLint initial_offsets[6][2] = {
[FACE_POS_X] = {0, 0},
[FACE_POS_Y] = {1, 0},
[FACE_POS_Z] = {1, 1},
[FACE_NEG_X] = {0, 2},
[FACE_NEG_Y] = {1, 2},
[FACE_NEG_Z] = {1, 3},
};
static GLint step_offsets[6][2] = {
[FACE_POS_X] = {0, 2},
[FACE_POS_Y] = {-1, 2},
[FACE_POS_Z] = {-1, 1},
[FACE_NEG_X] = {0, 2},
[FACE_NEG_Y] = {-1, 2},
[FACE_NEG_Z] = {-1, 1},
};
static GLint bottom_offsets[6] = {
[FACE_POS_X] = 16 + 0 * 8,
[FACE_POS_Y] = 16 + 1 * 8,
[FACE_POS_Z] = 16 + 2 * 8,
[FACE_NEG_X] = 16 + 3 * 8,
[FACE_NEG_Y] = 16 + 4 * 8,
[FACE_NEG_Z] = 16 + 5 * 8,
};
/**
* Cube texture map layout for i830M-GM915 and
* non-compressed cube texture map on GM945.
*
* Hardware layout looks like:
*
* +-------+-------+
* | | |
* | | |
* | | |
* | +x | +y |
* | | |
* | | |
* | | |
* | | |
* +---+---+-------+
* | | | |
* | +x| +y| |
* | | | |
* | | | |
* +-+-+---+ +z |
* | | | | |
* +-+-+ +z| |
* | | | |
* +-+-+---+-------+
* | | |
* | | |
* | | |
* | -x | -y |
* | | |
* | | |
* | | |
* | | |
* +---+---+-------+
* | | | |
* | -x| -y| |
* | | | |
* | | | |
* +-+-+---+ -z |
* | | | | |
* +-+-+ -z| |
* | | | |
* +-+---+-------+
*
*/
static void
i915_miptree_layout_cube(struct intel_mipmap_tree * mt)
{
const GLuint dim = mt->physical_width0;
GLuint face;
GLuint lvlWidth = mt->physical_width0, lvlHeight = mt->physical_height0;
GLint level;
assert(lvlWidth == lvlHeight); /* cubemap images are square */
/* double pitch for cube layouts */
mt->total_width = dim * 2;
mt->total_height = dim * 4;
for (level = mt->first_level; level <= mt->last_level; level++) {
intel_miptree_set_level_info(mt, level,
0, 0,
lvlWidth, lvlHeight,
6);
lvlWidth /= 2;
lvlHeight /= 2;
}
for (face = 0; face < 6; face++) {
GLuint x = initial_offsets[face][0] * dim;
GLuint y = initial_offsets[face][1] * dim;
GLuint d = dim;
for (level = mt->first_level; level <= mt->last_level; level++) {
intel_miptree_set_image_offset(mt, level, face, x, y);
if (d == 0)
printf("cube mipmap %d/%d (%d..%d) is 0x0\n",
face, level, mt->first_level, mt->last_level);
d >>= 1;
x += step_offsets[face][0] * d;
y += step_offsets[face][1] * d;
}
}
}
static void
i915_miptree_layout_3d(struct intel_mipmap_tree * mt)
{
GLuint width = mt->physical_width0;
GLuint height = mt->physical_height0;
GLuint depth = mt->physical_depth0;
GLuint stack_height = 0;
GLint level;
/* Calculate the size of a single slice. */
mt->total_width = mt->physical_width0;
/* XXX: hardware expects/requires 9 levels at minimum. */
for (level = mt->first_level; level <= MAX2(8, mt->last_level); level++) {
intel_miptree_set_level_info(mt, level, 0, mt->total_height,
width, height, depth);
stack_height += MAX2(2, height);
width = minify(width, 1);
height = minify(height, 1);
depth = minify(depth, 1);
}
/* Fixup depth image_offsets: */
depth = mt->physical_depth0;
for (level = mt->first_level; level <= mt->last_level; level++) {
GLuint i;
for (i = 0; i < depth; i++) {
intel_miptree_set_image_offset(mt, level, i,
0, i * stack_height);
}
depth = minify(depth, 1);
}
/* Multiply slice size by texture depth for total size. It's
* remarkable how wasteful of memory the i915 texture layouts
* are. They are largely fixed in the i945.
*/
mt->total_height = stack_height * mt->physical_depth0;
}
static void
i915_miptree_layout_2d(struct intel_mipmap_tree * mt)
{
GLuint width = mt->physical_width0;
GLuint height = mt->physical_height0;
GLuint img_height;
GLint level;
mt->total_width = mt->physical_width0;
mt->total_height = 0;
for (level = mt->first_level; level <= mt->last_level; level++) {
intel_miptree_set_level_info(mt, level,
0, mt->total_height,
width, height, 1);
if (mt->compressed)
img_height = ALIGN(height, 4) / 4;
else
img_height = ALIGN(height, 2);
mt->total_height += img_height;
width = minify(width, 1);
height = minify(height, 1);
}
}
void
i915_miptree_layout(struct intel_mipmap_tree * mt)
{
switch (mt->target) {
case GL_TEXTURE_CUBE_MAP:
i915_miptree_layout_cube(mt);
break;
case GL_TEXTURE_3D:
i915_miptree_layout_3d(mt);
break;
case GL_TEXTURE_1D:
case GL_TEXTURE_2D:
case GL_TEXTURE_RECTANGLE_ARB:
i915_miptree_layout_2d(mt);
break;
default:
_mesa_problem(NULL, "Unexpected tex target in i915_miptree_layout()");
break;
}
DBG("%s: %dx%dx%d\n", __func__,
mt->total_width, mt->total_height, mt->cpp);
}
/**
* Compressed cube texture map layout for GM945 and later.
*
* The hardware layout looks like the 830-915 layout, except for the small
* sizes. A zoomed in view of the layout for 945 is:
*
* +-------+-------+
* | 8x8 | 8x8 |
* | | |
* | | |
* | +x | +y |
* | | |
* | | |
* | | |
* | | |
* +---+---+-------+
* |4x4| | 8x8 |
* | +x| | |
* | | | |
* | | | |
* +---+ | +z |
* |4x4| | |
* | +y| | |
* | | | |
* +---+ +-------+
*
* ...
*
* +-------+-------+
* | 8x8 | 8x8 |
* | | |
* | | |
* | -x | -y |
* | | |
* | | |
* | | |
* | | |
* +---+---+-------+
* |4x4| | 8x8 |
* | -x| | |
* | | | |
* | | | |
* +---+ | -z |
* |4x4| | |
* | -y| | |
* | | | |
* +---+ +---+---+---+---+---+---+---+---+---+
* |4x4| |4x4| |2x2| |2x2| |2x2| |2x2|
* | +z| | -z| | +x| | +y| | +z| | -x| ...
* | | | | | | | | | | | |
* +---+ +---+ +---+ +---+ +---+ +---+
*
* The bottom row continues with the remaining 2x2 then the 1x1 mip contents
* in order, with each of them aligned to a 8x8 block boundary. Thus, for
* 32x32 cube maps and smaller, the bottom row layout is going to dictate the
* pitch of the tree. For a tree with 4x4 images, the pitch is at least
* 14 * 8 = 112 texels, for 2x2 it is at least 12 * 8 texels, and for 1x1
* it is 6 * 8 texels.
*/
static void
i945_miptree_layout_cube(struct intel_mipmap_tree * mt)
{
const GLuint dim = mt->physical_width0;
GLuint face;
GLuint lvlWidth = mt->physical_width0, lvlHeight = mt->physical_height0;
GLint level;
assert(lvlWidth == lvlHeight); /* cubemap images are square */
/* Depending on the size of the largest images, pitch can be
* determined either by the old-style packing of cubemap faces,
* or the final row of 4x4, 2x2 and 1x1 faces below this.
*/
if (dim > 32)
mt->total_width = dim * 2;
else
mt->total_width = 14 * 8;
if (dim >= 4)
mt->total_height = dim * 4 + 4;
else
mt->total_height = 4;
/* Set all the levels to effectively occupy the whole rectangular region. */
for (level = mt->first_level; level <= mt->last_level; level++) {
intel_miptree_set_level_info(mt, level,
0, 0,
lvlWidth, lvlHeight, 6);
lvlWidth /= 2;
lvlHeight /= 2;
}
for (face = 0; face < 6; face++) {
GLuint x = initial_offsets[face][0] * dim;
GLuint y = initial_offsets[face][1] * dim;
GLuint d = dim;
if (dim == 4 && face >= 4) {
y = mt->total_height - 4;
x = (face - 4) * 8;
} else if (dim < 4 && (face > 0 || mt->first_level > 0)) {
y = mt->total_height - 4;
x = face * 8;
}
for (level = mt->first_level; level <= mt->last_level; level++) {
intel_miptree_set_image_offset(mt, level, face, x, y);
d >>= 1;
switch (d) {
case 4:
switch (face) {
case FACE_POS_X:
case FACE_NEG_X:
x += step_offsets[face][0] * d;
y += step_offsets[face][1] * d;
break;
case FACE_POS_Y:
case FACE_NEG_Y:
y += 12;
x -= 8;
break;
case FACE_POS_Z:
case FACE_NEG_Z:
y = mt->total_height - 4;
x = (face - 4) * 8;
break;
}
break;
case 2:
y = mt->total_height - 4;
x = bottom_offsets[face];
break;
case 1:
x += 48;
break;
default:
x += step_offsets[face][0] * d;
y += step_offsets[face][1] * d;
break;
}
}
}
}
static void
i945_miptree_layout_3d(struct intel_mipmap_tree * mt)
{
GLuint width = mt->physical_width0;
GLuint height = mt->physical_height0;
GLuint depth = mt->physical_depth0;
GLuint pack_x_pitch, pack_x_nr;
GLuint pack_y_pitch;
GLuint level;
mt->total_width = mt->physical_width0;
mt->total_height = 0;
pack_y_pitch = MAX2(mt->physical_height0, 2);
pack_x_pitch = mt->total_width;
pack_x_nr = 1;
for (level = mt->first_level; level <= mt->last_level; level++) {
GLint x = 0;
GLint y = 0;
GLint q, j;
intel_miptree_set_level_info(mt, level,
0, mt->total_height,
width, height, depth);
for (q = 0; q < depth;) {
for (j = 0; j < pack_x_nr && q < depth; j++, q++) {
intel_miptree_set_image_offset(mt, level, q, x, y);
x += pack_x_pitch;
}
x = 0;
y += pack_y_pitch;
}
mt->total_height += y;
if (pack_x_pitch > 4) {
pack_x_pitch >>= 1;
pack_x_nr <<= 1;
assert(pack_x_pitch * pack_x_nr <= mt->total_width);
}
if (pack_y_pitch > 2) {
pack_y_pitch >>= 1;
}
width = minify(width, 1);
height = minify(height, 1);
depth = minify(depth, 1);
}
}
void
i945_miptree_layout(struct intel_mipmap_tree * mt)
{
switch (mt->target) {
case GL_TEXTURE_CUBE_MAP:
if (mt->compressed)
i945_miptree_layout_cube(mt);
else
i915_miptree_layout_cube(mt);
break;
case GL_TEXTURE_3D:
i945_miptree_layout_3d(mt);
break;
case GL_TEXTURE_1D:
case GL_TEXTURE_2D:
case GL_TEXTURE_RECTANGLE_ARB:
i945_miptree_layout_2d(mt);
break;
default:
_mesa_problem(NULL, "Unexpected tex target in i945_miptree_layout()");
break;
}
DBG("%s: %dx%dx%d\n", __func__,
mt->total_width, mt->total_height, mt->cpp);
}

View File

@ -1,448 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/mtypes.h"
#include "main/enums.h"
#include "main/macros.h"
#include "main/colormac.h"
#include "main/samplerobj.h"
#include "intel_mipmap_tree.h"
#include "intel_tex.h"
#include "i915_context.h"
#include "i915_reg.h"
static GLuint
translate_texture_format(mesa_format mesa_format, GLenum DepthMode)
{
switch (mesa_format) {
case MESA_FORMAT_L_UNORM8:
return MAPSURF_8BIT | MT_8BIT_L8;
case MESA_FORMAT_I_UNORM8:
return MAPSURF_8BIT | MT_8BIT_I8;
case MESA_FORMAT_A_UNORM8:
return MAPSURF_8BIT | MT_8BIT_A8;
case MESA_FORMAT_LA_UNORM8:
return MAPSURF_16BIT | MT_16BIT_AY88;
case MESA_FORMAT_B5G6R5_UNORM:
return MAPSURF_16BIT | MT_16BIT_RGB565;
case MESA_FORMAT_B5G5R5A1_UNORM:
return MAPSURF_16BIT | MT_16BIT_ARGB1555;
case MESA_FORMAT_B4G4R4A4_UNORM:
return MAPSURF_16BIT | MT_16BIT_ARGB4444;
case MESA_FORMAT_B8G8R8A8_SRGB:
case MESA_FORMAT_B8G8R8A8_UNORM:
return MAPSURF_32BIT | MT_32BIT_ARGB8888;
case MESA_FORMAT_B8G8R8X8_UNORM:
return MAPSURF_32BIT | MT_32BIT_XRGB8888;
case MESA_FORMAT_R8G8B8A8_UNORM:
return MAPSURF_32BIT | MT_32BIT_ABGR8888;
case MESA_FORMAT_YCBCR_REV:
return (MAPSURF_422 | MT_422_YCRCB_NORMAL);
case MESA_FORMAT_YCBCR:
return (MAPSURF_422 | MT_422_YCRCB_SWAPY);
case MESA_FORMAT_RGB_FXT1:
case MESA_FORMAT_RGBA_FXT1:
return (MAPSURF_COMPRESSED | MT_COMPRESS_FXT1);
case MESA_FORMAT_Z_UNORM16:
if (DepthMode == GL_ALPHA)
return (MAPSURF_16BIT | MT_16BIT_A16);
else if (DepthMode == GL_INTENSITY)
return (MAPSURF_16BIT | MT_16BIT_I16);
else
return (MAPSURF_16BIT | MT_16BIT_L16);
case MESA_FORMAT_RGBA_DXT1:
case MESA_FORMAT_RGB_DXT1:
case MESA_FORMAT_SRGB_DXT1:
case MESA_FORMAT_SRGBA_DXT1:
return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT1);
case MESA_FORMAT_RGBA_DXT3:
case MESA_FORMAT_SRGBA_DXT3:
return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT2_3);
case MESA_FORMAT_RGBA_DXT5:
case MESA_FORMAT_SRGBA_DXT5:
return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT4_5);
case MESA_FORMAT_Z24_UNORM_S8_UINT:
case MESA_FORMAT_Z24_UNORM_X8_UINT:
if (DepthMode == GL_ALPHA)
return (MAPSURF_32BIT | MT_32BIT_x8A24);
else if (DepthMode == GL_INTENSITY)
return (MAPSURF_32BIT | MT_32BIT_x8I24);
else
return (MAPSURF_32BIT | MT_32BIT_x8L24);
default:
fprintf(stderr, "%s: bad image format %s\n", __func__,
_mesa_get_format_name(mesa_format));
abort();
return 0;
}
}
/* The i915 (and related graphics cores) do not support GL_CLAMP. The
* Intel drivers for "other operating systems" implement GL_CLAMP as
* GL_CLAMP_TO_EDGE, so the same is done here.
*/
static GLuint
translate_wrap_mode(GLenum wrap)
{
switch (wrap) {
case GL_REPEAT:
return TEXCOORDMODE_WRAP;
case GL_CLAMP:
return TEXCOORDMODE_CLAMP_EDGE; /* not quite correct */
case GL_CLAMP_TO_EDGE:
return TEXCOORDMODE_CLAMP_EDGE;
case GL_CLAMP_TO_BORDER:
return TEXCOORDMODE_CLAMP_BORDER;
case GL_MIRRORED_REPEAT:
return TEXCOORDMODE_MIRROR;
default:
return TEXCOORDMODE_WRAP;
}
}
/* Recalculate all state from scratch. Perhaps not the most
* efficient, but this has gotten complex enough that we need
* something which is understandable and reliable.
*/
static bool
i915_update_tex_unit(struct intel_context *intel, GLuint unit, GLuint ss3)
{
struct gl_context *ctx = &intel->ctx;
struct i915_context *i915 = i915_context(ctx);
struct gl_texture_unit *tUnit = &ctx->Texture.Unit[unit];
struct gl_texture_object *tObj = tUnit->_Current;
struct intel_texture_object *intelObj = intel_texture_object(tObj);
struct gl_texture_image *firstImage;
struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
GLuint *state = i915->state.Tex[unit], format;
GLint lodbias, aniso = 0;
GLubyte border[4];
GLfloat maxlod;
memset(state, 0, sizeof(*state));
/*We need to refcount these. */
if (i915->state.tex_buffer[unit] != NULL) {
drm_intel_bo_unreference(i915->state.tex_buffer[unit]);
i915->state.tex_buffer[unit] = NULL;
}
if (!intel_finalize_mipmap_tree(intel, unit))
return false;
/* Get first image here, since intelObj->firstLevel will get set in
* the intel_finalize_mipmap_tree() call above.
*/
firstImage = tObj->Image[0][tObj->Attrib.BaseLevel];
drm_intel_bo_reference(intelObj->mt->region->bo);
i915->state.tex_buffer[unit] = intelObj->mt->region->bo;
i915->state.tex_offset[unit] = intelObj->mt->offset;
format = translate_texture_format(firstImage->TexFormat,
tObj->Attrib.DepthMode);
state[I915_TEXREG_MS3] =
(((firstImage->Height - 1) << MS3_HEIGHT_SHIFT) |
((firstImage->Width - 1) << MS3_WIDTH_SHIFT) | format);
if (intelObj->mt->region->tiling != I915_TILING_NONE) {
state[I915_TEXREG_MS3] |= MS3_TILED_SURFACE;
if (intelObj->mt->region->tiling == I915_TILING_Y)
state[I915_TEXREG_MS3] |= MS3_TILE_WALK;
}
/* We get one field with fraction bits for the maximum addressable
* (lowest resolution) LOD. Use it to cover both MAX_LEVEL and
* MAX_LOD.
*/
maxlod = MIN2(sampler->Attrib.MaxLod, tObj->_MaxLevel - tObj->Attrib.BaseLevel);
state[I915_TEXREG_MS4] =
((((intelObj->mt->region->pitch / 4) - 1) << MS4_PITCH_SHIFT) |
MS4_CUBE_FACE_ENA_MASK |
(U_FIXED(CLAMP(maxlod, 0.0, 11.0), 2) << MS4_MAX_LOD_SHIFT) |
((firstImage->Depth - 1) << MS4_VOLUME_DEPTH_SHIFT));
{
GLuint minFilt, mipFilt, magFilt;
switch (sampler->Attrib.MinFilter) {
case GL_NEAREST:
minFilt = FILTER_NEAREST;
mipFilt = MIPFILTER_NONE;
break;
case GL_LINEAR:
minFilt = FILTER_LINEAR;
mipFilt = MIPFILTER_NONE;
break;
case GL_NEAREST_MIPMAP_NEAREST:
minFilt = FILTER_NEAREST;
mipFilt = MIPFILTER_NEAREST;
break;
case GL_LINEAR_MIPMAP_NEAREST:
minFilt = FILTER_LINEAR;
mipFilt = MIPFILTER_NEAREST;
break;
case GL_NEAREST_MIPMAP_LINEAR:
minFilt = FILTER_NEAREST;
mipFilt = MIPFILTER_LINEAR;
break;
case GL_LINEAR_MIPMAP_LINEAR:
minFilt = FILTER_LINEAR;
mipFilt = MIPFILTER_LINEAR;
break;
default:
return false;
}
if (sampler->Attrib.MaxAnisotropy > 1.0) {
minFilt = FILTER_ANISOTROPIC;
magFilt = FILTER_ANISOTROPIC;
if (sampler->Attrib.MaxAnisotropy > 2.0)
aniso = SS2_MAX_ANISO_4;
else
aniso = SS2_MAX_ANISO_2;
}
else {
switch (sampler->Attrib.MagFilter) {
case GL_NEAREST:
magFilt = FILTER_NEAREST;
break;
case GL_LINEAR:
magFilt = FILTER_LINEAR;
break;
default:
return false;
}
}
lodbias = (int) ((tUnit->LodBias + sampler->Attrib.LodBias) * 16.0);
if (lodbias < -256)
lodbias = -256;
if (lodbias > 255)
lodbias = 255;
state[I915_TEXREG_SS2] = ((lodbias << SS2_LOD_BIAS_SHIFT) &
SS2_LOD_BIAS_MASK);
/* YUV conversion:
*/
if (firstImage->TexFormat == MESA_FORMAT_YCBCR ||
firstImage->TexFormat == MESA_FORMAT_YCBCR_REV)
state[I915_TEXREG_SS2] |= SS2_COLORSPACE_CONVERSION;
/* Shadow:
*/
if (sampler->Attrib.CompareMode == GL_COMPARE_R_TO_TEXTURE_ARB &&
tObj->Target != GL_TEXTURE_3D) {
if (tObj->Target == GL_TEXTURE_1D)
return false;
state[I915_TEXREG_SS2] |=
(SS2_SHADOW_ENABLE |
intel_translate_shadow_compare_func(sampler->Attrib.CompareFunc));
minFilt = FILTER_4X4_FLAT;
magFilt = FILTER_4X4_FLAT;
}
state[I915_TEXREG_SS2] |= ((minFilt << SS2_MIN_FILTER_SHIFT) |
(mipFilt << SS2_MIP_FILTER_SHIFT) |
(magFilt << SS2_MAG_FILTER_SHIFT) |
aniso);
}
{
GLenum ws = sampler->Attrib.WrapS;
GLenum wt = sampler->Attrib.WrapT;
GLenum wr = sampler->Attrib.WrapR;
float minlod;
/* We program 1D textures as 2D textures, so the 2D texcoord could
* result in sampling border values if we don't set the T wrap to
* repeat.
*/
if (tObj->Target == GL_TEXTURE_1D)
wt = GL_REPEAT;
/* 3D textures don't seem to respect the border color.
* Fallback if there's ever a danger that they might refer to
* it.
*
* Effectively this means fallback on 3D clamp or
* clamp_to_border.
*/
if (tObj->Target == GL_TEXTURE_3D &&
(sampler->Attrib.MinFilter != GL_NEAREST ||
sampler->Attrib.MagFilter != GL_NEAREST) &&
(ws == GL_CLAMP ||
wt == GL_CLAMP ||
wr == GL_CLAMP ||
ws == GL_CLAMP_TO_BORDER ||
wt == GL_CLAMP_TO_BORDER || wr == GL_CLAMP_TO_BORDER))
return false;
/* Only support TEXCOORDMODE_CLAMP_EDGE and TEXCOORDMODE_CUBE (not
* used) when using cube map texture coordinates
*/
if (tObj->Target == GL_TEXTURE_CUBE_MAP_ARB &&
(((ws != GL_CLAMP) && (ws != GL_CLAMP_TO_EDGE)) ||
((wt != GL_CLAMP) && (wt != GL_CLAMP_TO_EDGE))))
return false;
/*
* According to 3DSTATE_MAP_STATE at page of 104 in Bspec
* Vol3d 3D Instructions:
* [DevGDG and DevAlv]: Must be a power of 2 for cube maps.
* [DevLPT, DevCST and DevBLB]: If not a power of 2, cube maps
* must have all faces enabled.
*
* But, as I tested on pineview(DevBLB derived), the rendering is
* bad(you will find the color isn't samplered right in some
* fragments). After checking, it seems that the texture layout is
* wrong: making the width and height align of 4(although this
* doesn't make much sense) will fix this issue and also broke some
* others. Well, Bspec mentioned nothing about the layout alignment
* and layout for NPOT cube map. I guess the Bspec just assume it's
* a POT cube map.
*
* Thus, I guess we need do this for other platforms as well.
*/
if (tObj->Target == GL_TEXTURE_CUBE_MAP_ARB &&
!util_is_power_of_two_or_zero(firstImage->Height))
return false;
state[I915_TEXREG_SS3] = ss3; /* SS3_NORMALIZED_COORDS */
state[I915_TEXREG_SS3] |=
((translate_wrap_mode(ws) << SS3_TCX_ADDR_MODE_SHIFT) |
(translate_wrap_mode(wt) << SS3_TCY_ADDR_MODE_SHIFT) |
(translate_wrap_mode(wr) << SS3_TCZ_ADDR_MODE_SHIFT));
minlod = MIN2(sampler->Attrib.MinLod, tObj->_MaxLevel - tObj->Attrib.BaseLevel);
state[I915_TEXREG_SS3] |= (unit << SS3_TEXTUREMAP_INDEX_SHIFT);
state[I915_TEXREG_SS3] |= (U_FIXED(CLAMP(minlod, 0.0, 11.0), 4) <<
SS3_MIN_LOD_SHIFT);
}
if (sampler->Attrib.sRGBDecode == GL_DECODE_EXT &&
(_mesa_get_srgb_format_linear(firstImage->TexFormat) !=
firstImage->TexFormat)) {
state[I915_TEXREG_SS2] |= SS2_REVERSE_GAMMA_ENABLE;
}
/* convert border color from float to ubyte */
CLAMPED_FLOAT_TO_UBYTE(border[0], sampler->Attrib.state.border_color.f[0]);
CLAMPED_FLOAT_TO_UBYTE(border[1], sampler->Attrib.state.border_color.f[1]);
CLAMPED_FLOAT_TO_UBYTE(border[2], sampler->Attrib.state.border_color.f[2]);
CLAMPED_FLOAT_TO_UBYTE(border[3], sampler->Attrib.state.border_color.f[3]);
if (firstImage->_BaseFormat == GL_DEPTH_COMPONENT) {
/* GL specs that border color for depth textures is taken from the
* R channel, while the hardware uses A. Spam R into all the channels
* for safety.
*/
state[I915_TEXREG_SS4] = PACK_COLOR_8888(border[0],
border[0],
border[0],
border[0]);
} else {
state[I915_TEXREG_SS4] = PACK_COLOR_8888(border[3],
border[0],
border[1],
border[2]);
}
I915_ACTIVESTATE(i915, I915_UPLOAD_TEX(unit), true);
/* memcmp was already disabled, but definitely won't work as the
* region might now change and that wouldn't be detected:
*/
I915_STATECHANGE(i915, I915_UPLOAD_TEX(unit));
#if 0
DBG(TEXTURE, "state[I915_TEXREG_SS2] = 0x%x\n", state[I915_TEXREG_SS2]);
DBG(TEXTURE, "state[I915_TEXREG_SS3] = 0x%x\n", state[I915_TEXREG_SS3]);
DBG(TEXTURE, "state[I915_TEXREG_SS4] = 0x%x\n", state[I915_TEXREG_SS4]);
DBG(TEXTURE, "state[I915_TEXREG_MS2] = 0x%x\n", state[I915_TEXREG_MS2]);
DBG(TEXTURE, "state[I915_TEXREG_MS3] = 0x%x\n", state[I915_TEXREG_MS3]);
DBG(TEXTURE, "state[I915_TEXREG_MS4] = 0x%x\n", state[I915_TEXREG_MS4]);
#endif
return true;
}
void
i915UpdateTextureState(struct intel_context *intel)
{
bool ok = true;
GLuint i;
for (i = 0; i < I915_TEX_UNITS && ok; i++) {
if (intel->ctx.Texture.Unit[i]._Current) {
switch (intel->ctx.Texture.Unit[i]._Current->Target) {
case GL_TEXTURE_1D:
case GL_TEXTURE_2D:
case GL_TEXTURE_CUBE_MAP:
case GL_TEXTURE_3D:
ok = i915_update_tex_unit(intel, i, SS3_NORMALIZED_COORDS);
break;
case GL_TEXTURE_RECTANGLE:
ok = i915_update_tex_unit(intel, i, 0);
break;
default:
ok = false;
break;
}
} else {
struct i915_context *i915 = i915_context(&intel->ctx);
if (i915->state.active & I915_UPLOAD_TEX(i))
I915_ACTIVESTATE(i915, I915_UPLOAD_TEX(i), false);
if (i915->state.tex_buffer[i] != NULL) {
drm_intel_bo_unreference(i915->state.tex_buffer[i]);
i915->state.tex_buffer[i] = NULL;
}
}
}
FALLBACK(intel, I915_FALLBACK_TEXTURE, !ok);
}

View File

@ -1,858 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/glheader.h"
#include "main/mtypes.h"
#include "main/macros.h"
#include "main/renderbuffer.h"
#include "main/framebuffer.h"
#include "tnl/tnl.h"
#include "tnl/t_context.h"
#include "tnl/t_vertex.h"
#include "swrast_setup/swrast_setup.h"
#include "intel_batchbuffer.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
#include "intel_tris.h"
#include "intel_fbo.h"
#include "intel_buffers.h"
#include "i915_reg.h"
#include "i915_context.h"
static void
i915_render_prevalidate(struct intel_context *intel)
{
struct i915_context *i915 = i915_context(&intel->ctx);
i915ValidateFragmentProgram(i915);
}
static void
i915_render_start(struct intel_context *intel)
{
intel_prepare_render(intel);
}
static void
i915_reduced_primitive_state(struct intel_context *intel, GLenum rprim)
{
struct i915_context *i915 = i915_context(&intel->ctx);
GLuint st1 = i915->state.Stipple[I915_STPREG_ST1];
st1 &= ~ST1_ENABLE;
switch (rprim) {
case GL_QUADS: /* from RASTERIZE(GL_QUADS) in t_dd_tritemp.h */
case GL_TRIANGLES:
if (intel->ctx.Polygon.StippleFlag && intel->hw_stipple)
st1 |= ST1_ENABLE;
break;
case GL_LINES:
case GL_POINTS:
default:
break;
}
i915->intel.reduced_primitive = rprim;
if (st1 != i915->state.Stipple[I915_STPREG_ST1]) {
INTEL_FIREVERTICES(intel);
I915_STATECHANGE(i915, I915_UPLOAD_STIPPLE);
i915->state.Stipple[I915_STPREG_ST1] = st1;
}
}
/* Pull apart the vertex format registers and figure out how large a
* vertex is supposed to be.
*/
static bool
i915_check_vertex_size(struct intel_context *intel, GLuint expected)
{
struct i915_context *i915 = i915_context(&intel->ctx);
int lis2 = i915->state.Ctx[I915_CTXREG_LIS2];
int lis4 = i915->state.Ctx[I915_CTXREG_LIS4];
int i, sz = 0;
switch (lis4 & S4_VFMT_XYZW_MASK) {
case S4_VFMT_XY:
sz = 2;
break;
case S4_VFMT_XYZ:
sz = 3;
break;
case S4_VFMT_XYW:
sz = 3;
break;
case S4_VFMT_XYZW:
sz = 4;
break;
default:
fprintf(stderr, "no xyzw specified\n");
return 0;
}
if (lis4 & S4_VFMT_SPEC_FOG)
sz++;
if (lis4 & S4_VFMT_COLOR)
sz++;
if (lis4 & S4_VFMT_DEPTH_OFFSET)
sz++;
if (lis4 & S4_VFMT_POINT_WIDTH)
sz++;
if (lis4 & S4_VFMT_FOG_PARAM)
sz++;
for (i = 0; i < 8; i++) {
switch (lis2 & S2_TEXCOORD_FMT0_MASK) {
case TEXCOORDFMT_2D:
sz += 2;
break;
case TEXCOORDFMT_3D:
sz += 3;
break;
case TEXCOORDFMT_4D:
sz += 4;
break;
case TEXCOORDFMT_1D:
sz += 1;
break;
case TEXCOORDFMT_2D_16:
sz += 1;
break;
case TEXCOORDFMT_4D_16:
sz += 2;
break;
case TEXCOORDFMT_NOT_PRESENT:
break;
default:
fprintf(stderr, "bad texcoord fmt %d\n", i);
return false;
}
lis2 >>= S2_TEXCOORD_FMT1_SHIFT;
}
if (sz != expected)
fprintf(stderr, "vertex size mismatch %d/%d\n", sz, expected);
return sz == expected;
}
static void
i915_emit_invarient_state(struct intel_context *intel)
{
BATCH_LOCALS;
BEGIN_BATCH(15);
OUT_BATCH(_3DSTATE_AA_CMD |
AA_LINE_ECAAR_WIDTH_ENABLE |
AA_LINE_ECAAR_WIDTH_1_0 |
AA_LINE_REGION_WIDTH_ENABLE | AA_LINE_REGION_WIDTH_1_0);
OUT_BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
OUT_BATCH(0);
OUT_BATCH(_3DSTATE_DFLT_SPEC_CMD);
OUT_BATCH(0);
OUT_BATCH(_3DSTATE_DFLT_Z_CMD);
OUT_BATCH(0);
/* Don't support texture crossbar yet */
OUT_BATCH(_3DSTATE_COORD_SET_BINDINGS |
CSB_TCB(0, 0) |
CSB_TCB(1, 1) |
CSB_TCB(2, 2) |
CSB_TCB(3, 3) |
CSB_TCB(4, 4) | CSB_TCB(5, 5) | CSB_TCB(6, 6) | CSB_TCB(7, 7));
OUT_BATCH(_3DSTATE_SCISSOR_RECT_0_CMD);
OUT_BATCH(0);
OUT_BATCH(0);
/* XXX: Use this */
OUT_BATCH(_3DSTATE_SCISSOR_ENABLE_CMD | DISABLE_SCISSOR_RECT);
OUT_BATCH(_3DSTATE_DEPTH_SUBRECT_DISABLE);
OUT_BATCH(_3DSTATE_LOAD_INDIRECT | 0); /* disable indirect state */
OUT_BATCH(0);
ADVANCE_BATCH();
}
#define emit(intel, state, size ) \
intel_batchbuffer_data(intel, state, size)
static GLuint
get_dirty(struct i915_hw_state *state)
{
GLuint dirty;
/* Workaround the multitex hang - if one texture unit state is
* modified, emit all texture units.
*/
dirty = state->active & ~state->emitted;
if (dirty & I915_UPLOAD_TEX_ALL)
state->emitted &= ~I915_UPLOAD_TEX_ALL;
dirty = state->active & ~state->emitted;
return dirty;
}
static GLuint
get_state_size(struct i915_hw_state *state)
{
GLuint dirty = get_dirty(state);
GLuint i;
GLuint sz = 0;
if (dirty & I915_UPLOAD_INVARIENT)
sz += 30 * 4;
if (dirty & I915_UPLOAD_RASTER_RULES)
sz += sizeof(state->RasterRules);
if (dirty & I915_UPLOAD_CTX)
sz += sizeof(state->Ctx);
if (dirty & I915_UPLOAD_BLEND)
sz += sizeof(state->Blend);
if (dirty & I915_UPLOAD_BUFFERS)
sz += sizeof(state->Buffer);
if (dirty & I915_UPLOAD_STIPPLE)
sz += sizeof(state->Stipple);
if (dirty & I915_UPLOAD_TEX_ALL) {
int nr = 0;
for (i = 0; i < I915_TEX_UNITS; i++)
if (dirty & I915_UPLOAD_TEX(i))
nr++;
sz += (2 + nr * 3) * sizeof(GLuint) * 2;
}
if (dirty & I915_UPLOAD_CONSTANTS)
sz += state->ConstantSize * sizeof(GLuint);
if (dirty & I915_UPLOAD_PROGRAM)
sz += state->ProgramSize * sizeof(GLuint);
return sz;
}
/* Push the state into the sarea and/or texture memory.
*/
static void
i915_emit_state(struct intel_context *intel)
{
struct i915_context *i915 = i915_context(&intel->ctx);
struct i915_hw_state *state = &i915->state;
int i, count, aper_count;
GLuint dirty;
drm_intel_bo *aper_array[3 + I915_TEX_UNITS];
GET_CURRENT_CONTEXT(ctx);
BATCH_LOCALS;
/* We don't hold the lock at this point, so want to make sure that
* there won't be a buffer wrap between the state emits and the primitive
* emit header.
*
* It might be better to talk about explicit places where
* scheduling is allowed, rather than assume that it is whenever a
* batchbuffer fills up.
*/
intel_batchbuffer_require_space(intel,
get_state_size(state) +
INTEL_PRIM_EMIT_SIZE);
count = 0;
again:
if (intel->batch.bo == NULL) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "i915 emit state");
assert(0);
}
aper_count = 0;
dirty = get_dirty(state);
aper_array[aper_count++] = intel->batch.bo;
if (dirty & I915_UPLOAD_BUFFERS) {
if (state->draw_region)
aper_array[aper_count++] = state->draw_region->bo;
if (state->depth_region)
aper_array[aper_count++] = state->depth_region->bo;
}
if (dirty & I915_UPLOAD_TEX_ALL) {
for (i = 0; i < I915_TEX_UNITS; i++) {
if (dirty & I915_UPLOAD_TEX(i)) {
if (state->tex_buffer[i]) {
aper_array[aper_count++] = state->tex_buffer[i];
}
}
}
}
if (dri_bufmgr_check_aperture_space(aper_array, aper_count)) {
if (count == 0) {
count++;
intel_batchbuffer_flush(intel);
goto again;
} else {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "i915 emit state");
assert(0);
}
}
/* work out list of buffers to emit */
/* Do this here as we may have flushed the batchbuffer above,
* causing more state to be dirty!
*/
dirty = get_dirty(state);
state->emitted |= dirty;
assert(get_dirty(state) == 0);
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "%s dirty: %x\n", __func__, dirty);
if (dirty & I915_UPLOAD_INVARIENT) {
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I915_UPLOAD_INVARIENT:\n");
i915_emit_invarient_state(intel);
}
if (dirty & I915_UPLOAD_RASTER_RULES) {
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I915_UPLOAD_RASTER_RULES:\n");
emit(intel, state->RasterRules, sizeof(state->RasterRules));
}
if (dirty & I915_UPLOAD_CTX) {
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I915_UPLOAD_CTX:\n");
emit(intel, state->Ctx, sizeof(state->Ctx));
}
if (dirty & I915_UPLOAD_BLEND) {
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I915_UPLOAD_BLEND:\n");
emit(intel, state->Blend, sizeof(state->Blend));
}
if (dirty & I915_UPLOAD_BUFFERS) {
GLuint count;
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I915_UPLOAD_BUFFERS:\n");
count = 17;
if (state->Buffer[I915_DESTREG_DRAWRECT0] != MI_NOOP)
count++;
BEGIN_BATCH(count);
OUT_BATCH(state->Buffer[I915_DESTREG_CBUFADDR0]);
OUT_BATCH(state->Buffer[I915_DESTREG_CBUFADDR1]);
if (state->draw_region) {
OUT_RELOC(state->draw_region->bo,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
} else {
OUT_BATCH(0);
}
OUT_BATCH(state->Buffer[I915_DESTREG_DBUFADDR0]);
OUT_BATCH(state->Buffer[I915_DESTREG_DBUFADDR1]);
if (state->depth_region) {
OUT_RELOC(state->depth_region->bo,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
} else {
OUT_BATCH(0);
}
OUT_BATCH(state->Buffer[I915_DESTREG_DV0]);
OUT_BATCH(state->Buffer[I915_DESTREG_DV1]);
OUT_BATCH(state->Buffer[I915_DESTREG_SR0]);
OUT_BATCH(state->Buffer[I915_DESTREG_SR1]);
OUT_BATCH(state->Buffer[I915_DESTREG_SR2]);
OUT_BATCH(state->Buffer[I915_DESTREG_SENABLE]);
if (state->Buffer[I915_DESTREG_DRAWRECT0] != MI_NOOP)
OUT_BATCH(state->Buffer[I915_DESTREG_DRAWRECT0]);
OUT_BATCH(state->Buffer[I915_DESTREG_DRAWRECT1]);
OUT_BATCH(state->Buffer[I915_DESTREG_DRAWRECT2]);
OUT_BATCH(state->Buffer[I915_DESTREG_DRAWRECT3]);
OUT_BATCH(state->Buffer[I915_DESTREG_DRAWRECT4]);
OUT_BATCH(state->Buffer[I915_DESTREG_DRAWRECT5]);
ADVANCE_BATCH();
}
if (dirty & I915_UPLOAD_STIPPLE) {
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I915_UPLOAD_STIPPLE:\n");
emit(intel, state->Stipple, sizeof(state->Stipple));
}
/* Combine all the dirty texture state into a single command to
* avoid lockups on I915 hardware.
*/
if (dirty & I915_UPLOAD_TEX_ALL) {
int nr = 0;
GLuint unwind;
for (i = 0; i < I915_TEX_UNITS; i++)
if (dirty & I915_UPLOAD_TEX(i))
nr++;
BEGIN_BATCH(2 + nr * 3);
OUT_BATCH(_3DSTATE_MAP_STATE | (3 * nr));
OUT_BATCH((dirty & I915_UPLOAD_TEX_ALL) >> I915_UPLOAD_TEX_0_SHIFT);
for (i = 0; i < I915_TEX_UNITS; i++)
if (dirty & I915_UPLOAD_TEX(i)) {
OUT_RELOC(state->tex_buffer[i],
I915_GEM_DOMAIN_SAMPLER, 0,
state->tex_offset[i]);
OUT_BATCH(state->Tex[i][I915_TEXREG_MS3]);
OUT_BATCH(state->Tex[i][I915_TEXREG_MS4]);
}
ADVANCE_BATCH();
unwind = intel->batch.used;
BEGIN_BATCH(2 + nr * 3);
OUT_BATCH(_3DSTATE_SAMPLER_STATE | (3 * nr));
OUT_BATCH((dirty & I915_UPLOAD_TEX_ALL) >> I915_UPLOAD_TEX_0_SHIFT);
for (i = 0; i < I915_TEX_UNITS; i++)
if (dirty & I915_UPLOAD_TEX(i)) {
OUT_BATCH(state->Tex[i][I915_TEXREG_SS2]);
OUT_BATCH(state->Tex[i][I915_TEXREG_SS3]);
OUT_BATCH(state->Tex[i][I915_TEXREG_SS4]);
}
ADVANCE_BATCH();
if (i915->last_sampler &&
memcmp(intel->batch.map + i915->last_sampler,
intel->batch.map + unwind,
(2 + nr*3)*sizeof(int)) == 0)
intel->batch.used = unwind;
else
i915->last_sampler = unwind;
}
if (dirty & I915_UPLOAD_CONSTANTS) {
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I915_UPLOAD_CONSTANTS:\n");
emit(intel, state->Constant, state->ConstantSize * sizeof(GLuint));
}
if (dirty & I915_UPLOAD_PROGRAM) {
if (state->ProgramSize) {
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I915_UPLOAD_PROGRAM:\n");
assert((state->Program[0] & 0x1ff) + 2 == state->ProgramSize);
emit(intel, state->Program, state->ProgramSize * sizeof(GLuint));
if (INTEL_DEBUG & DEBUG_STATE)
i915_disassemble_program(state->Program, state->ProgramSize);
}
}
assert(get_dirty(state) == 0);
}
static void
i915_destroy_context(struct intel_context *intel)
{
GLuint i;
struct i915_context *i915 = i915_context(&intel->ctx);
intel_region_release(&i915->state.draw_region);
intel_region_release(&i915->state.depth_region);
for (i = 0; i < I915_TEX_UNITS; i++) {
if (i915->state.tex_buffer[i] != NULL) {
drm_intel_bo_unreference(i915->state.tex_buffer[i]);
i915->state.tex_buffer[i] = NULL;
}
}
_tnl_free_vertices(&intel->ctx);
}
void
i915_set_buf_info_for_region(uint32_t *state, struct intel_region *region,
uint32_t buffer_id)
{
state[0] = _3DSTATE_BUF_INFO_CMD;
state[1] = buffer_id;
if (region != NULL) {
state[1] |= BUF_3D_PITCH(region->pitch);
if (region->tiling != I915_TILING_NONE) {
state[1] |= BUF_3D_TILED_SURFACE;
if (region->tiling == I915_TILING_Y)
state[1] |= BUF_3D_TILE_WALK_Y;
}
} else {
/* Fill in a default pitch, since 0 is invalid. We'll be
* setting the buffer offset to 0 and not referencing the
* buffer, so the pitch could really be any valid value.
*/
state[1] |= BUF_3D_PITCH(4096);
}
}
static uint32_t i915_render_target_format_for_mesa_format[MESA_FORMAT_COUNT] =
{
[MESA_FORMAT_B8G8R8A8_UNORM] = DV_PF_8888,
[MESA_FORMAT_B8G8R8X8_UNORM] = DV_PF_8888,
[MESA_FORMAT_B5G6R5_UNORM] = DV_PF_565 | DITHER_FULL_ALWAYS,
[MESA_FORMAT_B5G5R5A1_UNORM] = DV_PF_1555 | DITHER_FULL_ALWAYS,
[MESA_FORMAT_B4G4R4A4_UNORM] = DV_PF_4444 | DITHER_FULL_ALWAYS,
};
static bool
i915_render_target_supported(struct intel_context *intel,
struct gl_renderbuffer *rb)
{
mesa_format format = rb->Format;
if (format == MESA_FORMAT_Z24_UNORM_S8_UINT ||
format == MESA_FORMAT_Z24_UNORM_X8_UINT ||
format == MESA_FORMAT_Z_UNORM16) {
return true;
}
return i915_render_target_format_for_mesa_format[format] != 0;
}
static void
i915_set_draw_region(struct intel_context *intel,
struct intel_region *color_regions[],
struct intel_region *depth_region,
GLuint num_regions)
{
struct i915_context *i915 = i915_context(&intel->ctx);
struct gl_context *ctx = &intel->ctx;
struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[0];
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
struct gl_renderbuffer *drb;
struct intel_renderbuffer *idrb = NULL;
GLuint value;
struct i915_hw_state *state = &i915->state;
uint32_t draw_x, draw_y, draw_offset;
if (state->draw_region != color_regions[0]) {
intel_region_reference(&state->draw_region, color_regions[0]);
}
if (state->depth_region != depth_region) {
intel_region_reference(&state->depth_region, depth_region);
}
/*
* Set stride/cpp values
*/
i915_set_buf_info_for_region(&state->Buffer[I915_DESTREG_CBUFADDR0],
color_regions[0], BUF_3D_ID_COLOR_BACK);
i915_set_buf_info_for_region(&state->Buffer[I915_DESTREG_DBUFADDR0],
depth_region, BUF_3D_ID_DEPTH);
/*
* Compute/set I915_DESTREG_DV1 value
*/
value = (DSTORG_HORT_BIAS(0x8) | /* .5 */
DSTORG_VERT_BIAS(0x8) | /* .5 */
LOD_PRECLAMP_OGL | TEX_DEFAULT_COLOR_OGL);
if (irb != NULL) {
value |= i915_render_target_format_for_mesa_format[intel_rb_format(irb)];
} else {
value |= DV_PF_8888;
}
if (depth_region && depth_region->cpp == 4) {
value |= DEPTH_FRMT_24_FIXED_8_OTHER;
}
else {
value |= DEPTH_FRMT_16_FIXED;
}
state->Buffer[I915_DESTREG_DV1] = value;
drb = ctx->DrawBuffer->Attachment[BUFFER_DEPTH].Renderbuffer;
if (!drb)
drb = ctx->DrawBuffer->Attachment[BUFFER_STENCIL].Renderbuffer;
if (drb)
idrb = intel_renderbuffer(drb);
/* We set up the drawing rectangle to be offset into the color
* region's location in the miptree. If it doesn't match with
* depth's offsets, we can't render to it.
*
* (Well, not actually true -- the hw grew a bit to let depth's
* offset get forced to 0,0. We may want to use that if people are
* hitting that case. Also, some configurations may be supportable
* by tweaking the start offset of the buffers around, which we
* can't do in general due to tiling)
*/
FALLBACK(intel, I915_FALLBACK_DRAW_OFFSET,
idrb && irb && (idrb->draw_x != irb->draw_x ||
idrb->draw_y != irb->draw_y));
if (irb) {
draw_x = irb->draw_x;
draw_y = irb->draw_y;
} else if (idrb) {
draw_x = idrb->draw_x;
draw_y = idrb->draw_y;
} else {
draw_x = 0;
draw_y = 0;
}
draw_offset = (draw_y << 16) | draw_x;
FALLBACK(intel, I915_FALLBACK_DRAW_OFFSET,
(ctx->DrawBuffer->Width + draw_x > 2048) ||
(ctx->DrawBuffer->Height + draw_y > 2048));
/* When changing drawing rectangle offset, an MI_FLUSH is first required. */
if (draw_offset != i915->last_draw_offset) {
state->Buffer[I915_DESTREG_DRAWRECT0] = MI_FLUSH | INHIBIT_FLUSH_RENDER_CACHE;
i915->last_draw_offset = draw_offset;
} else
state->Buffer[I915_DESTREG_DRAWRECT0] = MI_NOOP;
state->Buffer[I915_DESTREG_DRAWRECT1] = _3DSTATE_DRAWRECT_INFO;
state->Buffer[I915_DESTREG_DRAWRECT2] = 0;
state->Buffer[I915_DESTREG_DRAWRECT3] = draw_offset;
state->Buffer[I915_DESTREG_DRAWRECT4] =
((ctx->DrawBuffer->Width + draw_x - 1) & 0xffff) |
((ctx->DrawBuffer->Height + draw_y - 1) << 16);
state->Buffer[I915_DESTREG_DRAWRECT5] = draw_offset;
I915_STATECHANGE(i915, I915_UPLOAD_BUFFERS);
}
static void
i915_update_color_write_enable(struct i915_context *i915, bool enable)
{
uint32_t dw = i915->state.Ctx[I915_CTXREG_LIS6];
if (enable)
dw |= S6_COLOR_WRITE_ENABLE;
else
dw &= ~S6_COLOR_WRITE_ENABLE;
if (dw != i915->state.Ctx[I915_CTXREG_LIS6]) {
I915_STATECHANGE(i915, I915_UPLOAD_CTX);
i915->state.Ctx[I915_CTXREG_LIS6] = dw;
}
}
/**
* Update the hardware state for drawing into a window or framebuffer object.
*
* Called by glDrawBuffer, glBindFramebufferEXT, MakeCurrent, and other
* places within the driver.
*
* Basically, this needs to be called any time the current framebuffer
* changes, the renderbuffers change, or we need to draw into different
* color buffers.
*/
static void
i915_update_draw_buffer(struct intel_context *intel)
{
struct i915_context *i915 = (struct i915_context *)intel;
struct gl_context *ctx = &intel->ctx;
struct gl_framebuffer *fb = ctx->DrawBuffer;
struct intel_region *colorRegion = NULL, *depthRegion = NULL;
struct intel_renderbuffer *irbDepth = NULL, *irbStencil = NULL;
if (!fb) {
/* this can happen during the initial context initialization */
return;
}
irbDepth = intel_get_renderbuffer(fb, BUFFER_DEPTH);
irbStencil = intel_get_renderbuffer(fb, BUFFER_STENCIL);
/* Do this here, not core Mesa, since this function is called from
* many places within the driver.
*/
if (ctx->NewState & _NEW_BUFFERS) {
/* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
_mesa_update_framebuffer(ctx, ctx->ReadBuffer, ctx->DrawBuffer);
/* this updates the DrawBuffer's Width/Height if it's a FBO */
_mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer);
}
if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
/* this may occur when we're called by glBindFrameBuffer() during
* the process of someone setting up renderbuffers, etc.
*/
/*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
return;
}
/* How many color buffers are we drawing into?
*
* If there is more than one drawbuffer (GL_FRONT_AND_BACK), or the
* drawbuffers are too big, we have to fallback to software.
*/
if ((fb->Width > ctx->Const.MaxRenderbufferSize)
|| (fb->Height > ctx->Const.MaxRenderbufferSize)) {
FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, true);
} else if (fb->_NumColorDrawBuffers > 1) {
FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, true);
} else {
struct intel_renderbuffer *irb;
irb = intel_renderbuffer(fb->_ColorDrawBuffers[0]);
colorRegion = (irb && irb->mt) ? irb->mt->region : NULL;
FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, false);
}
/* Check for depth fallback. */
if (irbDepth && irbDepth->mt) {
FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, false);
depthRegion = irbDepth->mt->region;
} else if (irbDepth && !irbDepth->mt) {
FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, true);
depthRegion = NULL;
} else { /* !irbDepth */
/* No fallback is needed because there is no depth buffer. */
FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, false);
depthRegion = NULL;
}
/* Check for stencil fallback. */
if (irbStencil && irbStencil->mt) {
assert(intel_rb_format(irbStencil) == MESA_FORMAT_Z24_UNORM_S8_UINT);
FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, false);
} else if (irbStencil && !irbStencil->mt) {
FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, true);
} else { /* !irbStencil */
/* No fallback is needed because there is no stencil buffer. */
FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, false);
}
/* If we have a (packed) stencil buffer attached but no depth buffer,
* we still need to set up the shared depth/stencil state so we can use it.
*/
if (depthRegion == NULL && irbStencil && irbStencil->mt
&& intel_rb_format(irbStencil) == MESA_FORMAT_Z24_UNORM_S8_UINT) {
depthRegion = irbStencil->mt->region;
}
/*
* Update depth and stencil test state
*/
ctx->Driver.Enable(ctx, GL_DEPTH_TEST, ctx->Depth.Test);
ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
i915_update_color_write_enable(i915, colorRegion != NULL);
intel->vtbl.set_draw_region(intel, &colorRegion, depthRegion,
fb->_NumColorDrawBuffers);
intel->NewGLState |= _NEW_BUFFERS;
/* Set state we know depends on drawable parameters:
*/
intelCalcViewport(ctx);
ctx->Driver.Scissor(ctx);
/* Update culling direction which changes depending on the
* orientation of the buffer:
*/
ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
}
static void
i915_new_batch(struct intel_context *intel)
{
struct i915_context *i915 = i915_context(&intel->ctx);
/* Mark all state as needing to be emitted when starting a new batchbuffer.
* Using hardware contexts would be an alternative, but they have some
* difficulties associated with them (physical address requirements).
*/
i915->state.emitted = 0;
i915->last_draw_offset = 0;
i915->last_sampler = 0;
i915->current_vb_bo = NULL;
i915->current_vertex_size = 0;
}
static void
i915_assert_not_dirty( struct intel_context *intel )
{
struct i915_context *i915 = i915_context(&intel->ctx);
GLuint dirty = get_dirty(&i915->state);
assert(!dirty);
(void) dirty;
}
static void
i915_invalidate_state(struct intel_context *intel, GLuint new_state)
{
struct gl_context *ctx = &intel->ctx;
_swsetup_InvalidateState(ctx, new_state);
_tnl_InvalidateState(ctx, new_state);
_tnl_invalidate_vertex_state(ctx, new_state);
}
void
i915InitVtbl(struct i915_context *i915)
{
i915->intel.vtbl.check_vertex_size = i915_check_vertex_size;
i915->intel.vtbl.destroy = i915_destroy_context;
i915->intel.vtbl.emit_state = i915_emit_state;
i915->intel.vtbl.new_batch = i915_new_batch;
i915->intel.vtbl.reduced_primitive_state = i915_reduced_primitive_state;
i915->intel.vtbl.render_start = i915_render_start;
i915->intel.vtbl.render_prevalidate = i915_render_prevalidate;
i915->intel.vtbl.set_draw_region = i915_set_draw_region;
i915->intel.vtbl.update_draw_buffer = i915_update_draw_buffer;
i915->intel.vtbl.update_texture_state = i915UpdateTextureState;
i915->intel.vtbl.assert_not_dirty = i915_assert_not_dirty;
i915->intel.vtbl.finish_batch = intel_finish_vb;
i915->intel.vtbl.invalidate_state = i915_invalidate_state;
i915->intel.vtbl.render_target_supported = i915_render_target_supported;
}

View File

@ -1,265 +0,0 @@
/**************************************************************************
*
* Copyright 2006 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "intel_context.h"
#include "intel_batchbuffer.h"
#include "intel_buffer_objects.h"
#include "intel_reg.h"
#include "intel_bufmgr.h"
#include "intel_buffers.h"
static void
intel_batchbuffer_reset(struct intel_context *intel);
void
intel_batchbuffer_init(struct intel_context *intel)
{
intel_batchbuffer_reset(intel);
intel->batch.cpu_map = malloc(intel->maxBatchSize);
intel->batch.map = intel->batch.cpu_map;
}
static void
intel_batchbuffer_reset(struct intel_context *intel)
{
if (intel->batch.last_bo != NULL) {
drm_intel_bo_unreference(intel->batch.last_bo);
intel->batch.last_bo = NULL;
}
intel->batch.last_bo = intel->batch.bo;
intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer",
intel->maxBatchSize, 4096);
intel->batch.reserved_space = BATCH_RESERVED;
intel->batch.used = 0;
}
void
intel_batchbuffer_free(struct intel_context *intel)
{
free(intel->batch.cpu_map);
drm_intel_bo_unreference(intel->batch.last_bo);
drm_intel_bo_unreference(intel->batch.bo);
}
static void
do_batch_dump(struct intel_context *intel)
{
struct drm_intel_decode *decode;
struct intel_batchbuffer *batch = &intel->batch;
int ret;
decode = drm_intel_decode_context_alloc(intel->intelScreen->deviceID);
if (!decode)
return;
ret = drm_intel_bo_map(batch->bo, false);
if (ret == 0) {
drm_intel_decode_set_batch_pointer(decode,
batch->bo->virtual,
batch->bo->offset,
batch->used);
} else {
fprintf(stderr,
"WARNING: failed to map batchbuffer (%s), "
"dumping uploaded data instead.\n", strerror(ret));
drm_intel_decode_set_batch_pointer(decode,
batch->map,
batch->bo->offset,
batch->used);
}
drm_intel_decode(decode);
drm_intel_decode_context_free(decode);
if (ret == 0) {
drm_intel_bo_unmap(batch->bo);
if (intel->vtbl.debug_batch != NULL)
intel->vtbl.debug_batch(intel);
}
}
/* TODO: Push this whole function into bufmgr.
*/
static int
do_flush_locked(struct intel_context *intel)
{
struct intel_batchbuffer *batch = &intel->batch;
int ret = 0;
ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
if (!intel->intelScreen->no_hw) {
if (ret == 0) {
if (unlikely(INTEL_DEBUG & DEBUG_AUB) && intel->vtbl.annotate_aub)
intel->vtbl.annotate_aub(intel);
ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
I915_EXEC_RENDER);
}
}
if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
do_batch_dump(intel);
if (ret != 0) {
fprintf(stderr, "intel_do_flush_locked failed: %s\n", strerror(-ret));
exit(1);
}
intel->vtbl.new_batch(intel);
return ret;
}
int
_intel_batchbuffer_flush(struct intel_context *intel,
const char *file, int line)
{
int ret;
if (intel->batch.used == 0)
return 0;
if (intel->first_post_swapbuffers_batch == NULL) {
intel->first_post_swapbuffers_batch = intel->batch.bo;
drm_intel_bo_reference(intel->first_post_swapbuffers_batch);
}
if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line,
4*intel->batch.used);
intel->batch.reserved_space = 0;
if (intel->vtbl.finish_batch)
intel->vtbl.finish_batch(intel);
/* Mark the end of the buffer. */
intel_batchbuffer_emit_dword(intel, MI_BATCH_BUFFER_END);
if (intel->batch.used & 1) {
/* Round batchbuffer usage to 2 DWORDs. */
intel_batchbuffer_emit_dword(intel, MI_NOOP);
}
intel_upload_finish(intel);
/* Check that we didn't just wrap our batchbuffer at a bad time. */
assert(!intel->no_batch_wrap);
ret = do_flush_locked(intel);
if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
fprintf(stderr, "waiting for idle\n");
drm_intel_bo_wait_rendering(intel->batch.bo);
}
/* Reset the buffer:
*/
intel_batchbuffer_reset(intel);
return ret;
}
/* This is the only way buffers get added to the validate list.
*/
bool
intel_batchbuffer_emit_reloc(struct intel_context *intel,
drm_intel_bo *buffer,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta)
{
int ret;
ret = drm_intel_bo_emit_reloc(intel->batch.bo, 4*intel->batch.used,
buffer, delta,
read_domains, write_domain);
assert(ret == 0);
(void)ret;
/*
* Using the old buffer offset, write in what the right data would be, in case
* the buffer doesn't move and we can short-circuit the relocation processing
* in the kernel
*/
intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
return true;
}
bool
intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
drm_intel_bo *buffer,
uint32_t read_domains,
uint32_t write_domain,
uint32_t delta)
{
int ret;
ret = drm_intel_bo_emit_reloc_fence(intel->batch.bo, 4*intel->batch.used,
buffer, delta,
read_domains, write_domain);
assert(ret == 0);
(void)ret;
/*
* Using the old buffer offset, write in what the right data would
* be, in case the buffer doesn't move and we can short-circuit the
* relocation processing in the kernel
*/
intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
return true;
}
void
intel_batchbuffer_data(struct intel_context *intel,
const void *data, GLuint bytes)
{
assert((bytes & 3) == 0);
intel_batchbuffer_require_space(intel, bytes);
memcpy(intel->batch.map + intel->batch.used, data, bytes);
intel->batch.used += bytes >> 2;
}
/* Emit a pipelined flush to either flush render and texture cache for
* reading from a FBO-drawn texture, or flush so that frontbuffer
* render appears on the screen in DRI1.
*
* This is also used for the always_flush_cache driconf debug option.
*/
void
intel_batchbuffer_emit_mi_flush(struct intel_context *intel)
{
BEGIN_BATCH(1);
OUT_BATCH(MI_FLUSH);
ADVANCE_BATCH();
}

View File

@ -1,151 +0,0 @@
#ifndef INTEL_BATCHBUFFER_H
#define INTEL_BATCHBUFFER_H
#include "main/mtypes.h"
#include "intel_context.h"
#include "intel_bufmgr.h"
#include "intel_reg.h"
/**
* Number of bytes to reserve for commands necessary to complete a batch.
*
* This includes:
* - MI_BATCHBUFFER_END (4 bytes)
* - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes)
* - Any state emitted by vtbl->finish_batch():
* - Gen4-5 record ending occlusion query values (4 * 4 = 16 bytes)
*/
#define BATCH_RESERVED 24
struct intel_batchbuffer;
void intel_batchbuffer_init(struct intel_context *intel);
void intel_batchbuffer_free(struct intel_context *intel);
int _intel_batchbuffer_flush(struct intel_context *intel,
const char *file, int line);
#define intel_batchbuffer_flush(intel) \
_intel_batchbuffer_flush(intel, __FILE__, __LINE__)
/* Unlike bmBufferData, this currently requires the buffer be mapped.
* Consider it a convenience function wrapping multple
* intel_buffer_dword() calls.
*/
void intel_batchbuffer_data(struct intel_context *intel,
const void *data, GLuint bytes);
bool intel_batchbuffer_emit_reloc(struct intel_context *intel,
drm_intel_bo *buffer,
uint32_t read_domains,
uint32_t write_domain,
uint32_t offset);
bool intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
drm_intel_bo *buffer,
uint32_t read_domains,
uint32_t write_domain,
uint32_t offset);
void intel_batchbuffer_emit_mi_flush(struct intel_context *intel);
static inline uint32_t float_as_int(float f)
{
union {
float f;
uint32_t d;
} fi;
fi.f = f;
return fi.d;
}
/* Inline functions - might actually be better off with these
* non-inlined. Certainly better off switching all command packets to
* be passed as structs rather than dwords, but that's a little bit of
* work...
*/
static inline unsigned
intel_batchbuffer_space(struct intel_context *intel)
{
return (intel->batch.bo->size - intel->batch.reserved_space)
- intel->batch.used*4;
}
static inline void
intel_batchbuffer_emit_dword(struct intel_context *intel, GLuint dword)
{
#ifdef DEBUG
assert(intel_batchbuffer_space(intel) >= 4);
#endif
intel->batch.map[intel->batch.used++] = dword;
}
static inline void
intel_batchbuffer_emit_float(struct intel_context *intel, float f)
{
intel_batchbuffer_emit_dword(intel, float_as_int(f));
}
static inline void
intel_batchbuffer_require_space(struct intel_context *intel,
GLuint sz)
{
#ifdef DEBUG
assert(sz < intel->maxBatchSize - BATCH_RESERVED);
#endif
if (intel_batchbuffer_space(intel) < sz)
intel_batchbuffer_flush(intel);
}
static inline void
intel_batchbuffer_begin(struct intel_context *intel, int n)
{
intel_batchbuffer_require_space(intel, n * 4);
intel->batch.emit = intel->batch.used;
#ifdef DEBUG
intel->batch.total = n;
#endif
}
static inline void
intel_batchbuffer_advance(struct intel_context *intel)
{
#ifdef DEBUG
struct intel_batchbuffer *batch = &intel->batch;
unsigned int _n = batch->used - batch->emit;
assert(batch->total != 0);
if (_n != batch->total) {
fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",
_n, batch->total);
abort();
}
batch->total = 0;
#else
(void) intel;
#endif
}
/* Here are the crusty old macros, to be removed:
*/
#define BATCH_LOCALS
#define BEGIN_BATCH(n) intel_batchbuffer_begin(intel, n)
#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel, d)
#define OUT_BATCH_F(f) intel_batchbuffer_emit_float(intel,f)
#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
intel_batchbuffer_emit_reloc(intel, buf, \
read_domains, write_domain, delta); \
} while (0)
#define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do { \
intel_batchbuffer_emit_reloc_fenced(intel, buf, \
read_domains, write_domain, delta); \
} while (0)
#define ADVANCE_BATCH() intel_batchbuffer_advance(intel);
#define CACHED_BATCH() intel_batchbuffer_cached_advance(intel);
#endif

View File

@ -1,671 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/mtypes.h"
#include "main/context.h"
#include "main/enums.h"
#include "main/colormac.h"
#include "main/fbobject.h"
#include "intel_blit.h"
#include "intel_buffers.h"
#include "intel_context.h"
#include "intel_fbo.h"
#include "intel_reg.h"
#include "intel_regions.h"
#include "intel_batchbuffer.h"
#include "intel_mipmap_tree.h"
#define FILE_DEBUG_FLAG DEBUG_BLIT
static void
intel_miptree_set_alpha_to_one(struct intel_context *intel,
struct intel_mipmap_tree *mt,
int x, int y, int width, int height);
static GLuint translate_raster_op(enum gl_logicop_mode logicop)
{
return logicop | (logicop << 4);
}
static uint32_t
br13_for_cpp(int cpp)
{
switch (cpp) {
case 4:
return BR13_8888;
break;
case 2:
return BR13_565;
break;
case 1:
return BR13_8;
break;
default:
assert(0);
return 0;
}
}
/* Copy BitBlt
*/
static bool
emit_copy_blit(struct intel_context *intel,
GLuint cpp,
GLshort src_pitch,
drm_intel_bo *src_buffer,
GLuint src_offset,
uint32_t src_tiling,
GLshort dst_pitch,
drm_intel_bo *dst_buffer,
GLuint dst_offset,
uint32_t dst_tiling,
GLshort src_x, GLshort src_y,
GLshort dst_x, GLshort dst_y,
GLshort w, GLshort h,
enum gl_logicop_mode logic_op)
{
GLuint CMD, BR13, pass = 0;
int dst_y2 = dst_y + h;
int dst_x2 = dst_x + w;
drm_intel_bo *aper_array[3];
bool dst_y_tiled = dst_tiling == I915_TILING_Y;
bool src_y_tiled = src_tiling == I915_TILING_Y;
BATCH_LOCALS;
if (dst_tiling != I915_TILING_NONE) {
if (dst_offset & 4095)
return false;
}
if (src_tiling != I915_TILING_NONE) {
if (src_offset & 4095)
return false;
}
if (dst_y_tiled || src_y_tiled)
return false;
/* do space check before going any further */
do {
aper_array[0] = intel->batch.bo;
aper_array[1] = dst_buffer;
aper_array[2] = src_buffer;
if (dri_bufmgr_check_aperture_space(aper_array, 3) != 0) {
intel_batchbuffer_flush(intel);
pass++;
} else
break;
} while (pass < 2);
if (pass >= 2)
return false;
intel_batchbuffer_require_space(intel, 8 * 4);
DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
__func__,
src_buffer, src_pitch, src_offset, src_x, src_y,
dst_buffer, dst_pitch, dst_offset, dst_x, dst_y, w, h);
/* Blit pitch must be dword-aligned. Otherwise, the hardware appears to drop
* the low bits. Offsets must be naturally aligned.
*/
if (src_pitch % 4 != 0 || src_offset % cpp != 0 ||
dst_pitch % 4 != 0 || dst_offset % cpp != 0)
return false;
/* For big formats (such as floating point), do the copy using 16 or 32bpp
* and multiply the coordinates.
*/
if (cpp > 4) {
if (cpp % 4 == 2) {
dst_x *= cpp / 2;
dst_x2 *= cpp / 2;
src_x *= cpp / 2;
cpp = 2;
} else {
assert(cpp % 4 == 0);
dst_x *= cpp / 4;
dst_x2 *= cpp / 4;
src_x *= cpp / 4;
cpp = 4;
}
}
BR13 = br13_for_cpp(cpp) | translate_raster_op(logic_op) << 16;
switch (cpp) {
case 1:
case 2:
CMD = XY_SRC_COPY_BLT_CMD;
break;
case 4:
CMD = XY_SRC_COPY_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
break;
default:
return false;
}
if (dst_y2 <= dst_y || dst_x2 <= dst_x) {
return true;
}
assert(dst_x < dst_x2);
assert(dst_y < dst_y2);
BEGIN_BATCH(8);
OUT_BATCH(CMD | (8 - 2));
OUT_BATCH(BR13 | (uint16_t)dst_pitch);
OUT_BATCH((dst_y << 16) | dst_x);
OUT_BATCH((dst_y2 << 16) | dst_x2);
OUT_RELOC_FENCED(dst_buffer,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
dst_offset);
OUT_BATCH((src_y << 16) | src_x);
OUT_BATCH((uint16_t)src_pitch);
OUT_RELOC_FENCED(src_buffer,
I915_GEM_DOMAIN_RENDER, 0,
src_offset);
ADVANCE_BATCH();
intel_batchbuffer_emit_mi_flush(intel);
return true;
}
/**
* Implements a rectangular block transfer (blit) of pixels between two
* miptrees.
*
* Our blitter can operate on 1, 2, or 4-byte-per-pixel data, with generous,
* but limited, pitches and sizes allowed.
*
* The src/dst coordinates are relative to the given level/slice of the
* miptree.
*
* If @src_flip or @dst_flip is set, then the rectangle within that miptree
* will be inverted (including scanline order) when copying. This is common
* in GL when copying between window system and user-created
* renderbuffers/textures.
*/
bool
intel_miptree_blit(struct intel_context *intel,
struct intel_mipmap_tree *src_mt,
int src_level, int src_slice,
uint32_t src_x, uint32_t src_y, bool src_flip,
struct intel_mipmap_tree *dst_mt,
int dst_level, int dst_slice,
uint32_t dst_x, uint32_t dst_y, bool dst_flip,
uint32_t width, uint32_t height,
enum gl_logicop_mode logicop)
{
/* No sRGB decode or encode is done by the hardware blitter, which is
* consistent with what we want in the callers (glCopyTexSubImage(),
* glBlitFramebuffer(), texture validation, etc.).
*/
mesa_format src_format = _mesa_get_srgb_format_linear(src_mt->format);
mesa_format dst_format = _mesa_get_srgb_format_linear(dst_mt->format);
/* The blitter doesn't support doing any format conversions. We do also
* support blitting ARGB8888 to XRGB8888 (trivial, the values dropped into
* the X channel don't matter), and XRGB8888 to ARGB8888 by setting the A
* channel to 1.0 at the end.
*/
if (src_format != dst_format &&
((src_format != MESA_FORMAT_B8G8R8A8_UNORM &&
src_format != MESA_FORMAT_B8G8R8X8_UNORM) ||
(dst_format != MESA_FORMAT_B8G8R8A8_UNORM &&
dst_format != MESA_FORMAT_B8G8R8X8_UNORM))) {
perf_debug("%s: Can't use hardware blitter from %s to %s, "
"falling back.\n", __func__,
_mesa_get_format_name(src_format),
_mesa_get_format_name(dst_format));
return false;
}
/* According to the Ivy Bridge PRM, Vol1 Part4, section 1.2.1.2 (Graphics
* Data Size Limitations):
*
* The BLT engine is capable of transferring very large quantities of
* graphics data. Any graphics data read from and written to the
* destination is permitted to represent a number of pixels that
* occupies up to 65,536 scan lines and up to 32,768 bytes per scan line
* at the destination. The maximum number of pixels that may be
* represented per scan lines worth of graphics data depends on the
* color depth.
*
* Furthermore, emit_copy_blit (which is called below) uses a signed
* 16-bit integer to represent buffer pitch, so it can only handle buffer
* pitches < 32k.
*
* As a result of these two limitations, we can only use the blitter to do
* this copy when the region's pitch is less than 32k.
*/
if (src_mt->region->pitch > 32768 ||
dst_mt->region->pitch > 32768) {
perf_debug("Falling back due to >32k pitch\n");
return false;
}
if (src_flip)
src_y = src_mt->level[src_level].height - src_y - height;
if (dst_flip)
dst_y = dst_mt->level[dst_level].height - dst_y - height;
int src_pitch = src_mt->region->pitch;
if (src_flip != dst_flip)
src_pitch = -src_pitch;
uint32_t src_image_x, src_image_y;
intel_miptree_get_image_offset(src_mt, src_level, src_slice,
&src_image_x, &src_image_y);
src_x += src_image_x;
src_y += src_image_y;
uint32_t dst_image_x, dst_image_y;
intel_miptree_get_image_offset(dst_mt, dst_level, dst_slice,
&dst_image_x, &dst_image_y);
dst_x += dst_image_x;
dst_y += dst_image_y;
if (!emit_copy_blit(intel,
src_mt->cpp,
src_pitch,
src_mt->region->bo, src_mt->offset,
src_mt->region->tiling,
dst_mt->region->pitch,
dst_mt->region->bo, dst_mt->offset,
dst_mt->region->tiling,
src_x, src_y,
dst_x, dst_y,
width, height,
logicop)) {
return false;
}
if (src_mt->format == MESA_FORMAT_B8G8R8X8_UNORM &&
dst_mt->format == MESA_FORMAT_B8G8R8A8_UNORM) {
intel_miptree_set_alpha_to_one(intel, dst_mt,
dst_x, dst_y,
width, height);
}
return true;
}
/**
* Use blitting to clear the renderbuffers named by 'flags'.
* Note: we can't use the ctx->DrawBuffer->_ColorDrawBufferIndexes field
* since that might include software renderbuffers or renderbuffers
* which we're clearing with triangles.
* \param mask bitmask of BUFFER_BIT_* values indicating buffers to clear
*/
GLbitfield
intelClearWithBlit(struct gl_context *ctx, GLbitfield mask)
{
struct intel_context *intel = intel_context(ctx);
struct gl_framebuffer *fb = ctx->DrawBuffer;
GLuint clear_depth_value, clear_depth_mask;
GLint cx, cy, cw, ch;
GLbitfield fail_mask = 0;
BATCH_LOCALS;
/* Note: we don't use this function on Gen7+ hardware, so we can safely
* ignore fast color clear issues.
*/
assert(intel->gen < 7);
/*
* Compute values for clearing the buffers.
*/
clear_depth_value = 0;
clear_depth_mask = 0;
if (mask & BUFFER_BIT_DEPTH) {
clear_depth_value = (GLuint) (fb->_DepthMax * ctx->Depth.Clear);
clear_depth_mask = XY_BLT_WRITE_RGB;
}
if (mask & BUFFER_BIT_STENCIL) {
clear_depth_value |= (ctx->Stencil.Clear & 0xff) << 24;
clear_depth_mask |= XY_BLT_WRITE_ALPHA;
}
cx = fb->_Xmin;
if (_mesa_is_winsys_fbo(fb))
cy = ctx->DrawBuffer->Height - fb->_Ymax;
else
cy = fb->_Ymin;
cw = fb->_Xmax - fb->_Xmin;
ch = fb->_Ymax - fb->_Ymin;
if (cw == 0 || ch == 0)
return 0;
/* Loop over all renderbuffers */
mask &= (1 << BUFFER_COUNT) - 1;
while (mask) {
GLuint buf = ffs(mask) - 1;
bool is_depth_stencil = buf == BUFFER_DEPTH || buf == BUFFER_STENCIL;
struct intel_renderbuffer *irb;
int x1, y1, x2, y2;
uint32_t clear_val;
uint32_t BR13, CMD;
struct intel_region *region;
int pitch, cpp;
drm_intel_bo *aper_array[2];
mask &= ~(1 << buf);
irb = intel_get_renderbuffer(fb, buf);
if (irb && irb->mt) {
region = irb->mt->region;
assert(region);
assert(region->bo);
} else {
fail_mask |= 1 << buf;
continue;
}
/* OK, clear this renderbuffer */
x1 = cx + irb->draw_x;
y1 = cy + irb->draw_y;
x2 = cx + cw + irb->draw_x;
y2 = cy + ch + irb->draw_y;
pitch = region->pitch;
cpp = region->cpp;
DBG("%s dst:buf(%p)/%d %d,%d sz:%dx%d\n",
__func__,
region->bo, pitch,
x1, y1, x2 - x1, y2 - y1);
BR13 = 0xf0 << 16;
CMD = XY_COLOR_BLT_CMD;
/* Setup the blit command */
if (cpp == 4) {
if (is_depth_stencil) {
CMD |= clear_depth_mask;
} else {
/* clearing RGBA */
CMD |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
}
}
assert(region->tiling != I915_TILING_Y);
BR13 |= pitch;
if (is_depth_stencil) {
clear_val = clear_depth_value;
} else {
uint8_t clear[4];
GLfloat *color = ctx->Color.ClearColor.f;
_mesa_unclamped_float_rgba_to_ubyte(clear, color);
switch (intel_rb_format(irb)) {
case MESA_FORMAT_B8G8R8A8_UNORM:
case MESA_FORMAT_B8G8R8X8_UNORM:
clear_val = PACK_COLOR_8888(clear[3], clear[0],
clear[1], clear[2]);
break;
case MESA_FORMAT_B5G6R5_UNORM:
clear_val = PACK_COLOR_565(clear[0], clear[1], clear[2]);
break;
case MESA_FORMAT_B4G4R4A4_UNORM:
clear_val = PACK_COLOR_4444(clear[3], clear[0],
clear[1], clear[2]);
break;
case MESA_FORMAT_B5G5R5A1_UNORM:
clear_val = PACK_COLOR_1555(clear[3], clear[0],
clear[1], clear[2]);
break;
case MESA_FORMAT_A_UNORM8:
clear_val = PACK_COLOR_8888(clear[3], clear[3],
clear[3], clear[3]);
break;
default:
fail_mask |= 1 << buf;
continue;
}
}
BR13 |= br13_for_cpp(cpp);
assert(x1 < x2);
assert(y1 < y2);
/* do space check before going any further */
aper_array[0] = intel->batch.bo;
aper_array[1] = region->bo;
if (drm_intel_bufmgr_check_aperture_space(aper_array,
ARRAY_SIZE(aper_array)) != 0) {
intel_batchbuffer_flush(intel);
}
BEGIN_BATCH(6);
OUT_BATCH(CMD | (6 - 2));
OUT_BATCH(BR13);
OUT_BATCH((y1 << 16) | x1);
OUT_BATCH((y2 << 16) | x2);
OUT_RELOC_FENCED(region->bo,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
0);
OUT_BATCH(clear_val);
ADVANCE_BATCH();
if (intel->always_flush_cache)
intel_batchbuffer_emit_mi_flush(intel);
if (buf == BUFFER_DEPTH || buf == BUFFER_STENCIL)
mask &= ~(BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL);
}
return fail_mask;
}
bool
intelEmitImmediateColorExpandBlit(struct intel_context *intel,
GLuint cpp,
GLubyte *src_bits, GLuint src_size,
GLuint fg_color,
GLshort dst_pitch,
drm_intel_bo *dst_buffer,
GLuint dst_offset,
uint32_t dst_tiling,
GLshort x, GLshort y,
GLshort w, GLshort h,
enum gl_logicop_mode logic_op)
{
int dwords = ALIGN(src_size, 8) / 4;
uint32_t opcode, br13, blit_cmd;
if (dst_tiling != I915_TILING_NONE) {
if (dst_offset & 4095)
return false;
if (dst_tiling == I915_TILING_Y)
return false;
}
assert((unsigned)logic_op <= 0x0f);
assert(dst_pitch > 0);
if (w < 0 || h < 0)
return true;
DBG("%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d, %d bytes %d dwords\n",
__func__,
dst_buffer, dst_pitch, dst_offset, x, y, w, h, src_size, dwords);
intel_batchbuffer_require_space(intel,
(8 * 4) +
(3 * 4) +
dwords * 4);
opcode = XY_SETUP_BLT_CMD;
if (cpp == 4)
opcode |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
br13 = dst_pitch | (translate_raster_op(logic_op) << 16) | (1 << 29);
br13 |= br13_for_cpp(cpp);
blit_cmd = XY_TEXT_IMMEDIATE_BLIT_CMD | XY_TEXT_BYTE_PACKED; /* packing? */
if (dst_tiling != I915_TILING_NONE)
blit_cmd |= XY_DST_TILED;
BEGIN_BATCH(8 + 3);
OUT_BATCH(opcode | (8 - 2));
OUT_BATCH(br13);
OUT_BATCH((0 << 16) | 0); /* clip x1, y1 */
OUT_BATCH((100 << 16) | 100); /* clip x2, y2 */
OUT_RELOC_FENCED(dst_buffer,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
dst_offset);
OUT_BATCH(0); /* bg */
OUT_BATCH(fg_color); /* fg */
OUT_BATCH(0); /* pattern base addr */
OUT_BATCH(blit_cmd | ((3 - 2) + dwords));
OUT_BATCH((y << 16) | x);
OUT_BATCH(((y + h) << 16) | (x + w));
ADVANCE_BATCH();
intel_batchbuffer_data(intel, src_bits, dwords * 4);
intel_batchbuffer_emit_mi_flush(intel);
return true;
}
/* We don't have a memmove-type blit like some other hardware, so we'll do a
* rectangular blit covering a large space, then emit 1-scanline blit at the
* end to cover the last if we need.
*/
void
intel_emit_linear_blit(struct intel_context *intel,
drm_intel_bo *dst_bo,
unsigned int dst_offset,
drm_intel_bo *src_bo,
unsigned int src_offset,
unsigned int size)
{
struct gl_context *ctx = &intel->ctx;
GLuint pitch, height;
bool ok;
/* The pitch given to the GPU must be DWORD aligned, and
* we want width to match pitch. Max width is (1 << 15 - 1),
* rounding that down to the nearest DWORD is 1 << 15 - 4
*/
pitch = ROUND_DOWN_TO(MIN2(size, (1 << 15) - 1), 4);
height = (pitch == 0) ? 1 : size / pitch;
ok = emit_copy_blit(intel, 1,
pitch, src_bo, src_offset, I915_TILING_NONE,
pitch, dst_bo, dst_offset, I915_TILING_NONE,
0, 0, /* src x/y */
0, 0, /* dst x/y */
pitch, height, /* w, h */
COLOR_LOGICOP_COPY);
if (!ok)
_mesa_problem(ctx, "Failed to linear blit %dx%d\n", pitch, height);
src_offset += pitch * height;
dst_offset += pitch * height;
size -= pitch * height;
assert (size < (1 << 15));
pitch = ALIGN(size, 4);
if (size != 0) {
ok = emit_copy_blit(intel, 1,
pitch, src_bo, src_offset, I915_TILING_NONE,
pitch, dst_bo, dst_offset, I915_TILING_NONE,
0, 0, /* src x/y */
0, 0, /* dst x/y */
size, 1, /* w, h */
COLOR_LOGICOP_COPY);
if (!ok)
_mesa_problem(ctx, "Failed to linear blit %dx%d\n", size, 1);
}
}
/**
* Used to initialize the alpha value of an ARGB8888 miptree after copying
* into it from an XRGB8888 source.
*
* This is very common with glCopyTexImage2D(). Note that the coordinates are
* relative to the start of the miptree, not relative to a slice within the
* miptree.
*/
static void
intel_miptree_set_alpha_to_one(struct intel_context *intel,
struct intel_mipmap_tree *mt,
int x, int y, int width, int height)
{
struct intel_region *region = mt->region;
uint32_t BR13, CMD;
int pitch, cpp;
drm_intel_bo *aper_array[2];
BATCH_LOCALS;
pitch = region->pitch;
cpp = region->cpp;
DBG("%s dst:buf(%p)/%d %d,%d sz:%dx%d\n",
__func__, region->bo, pitch, x, y, width, height);
BR13 = br13_for_cpp(cpp) | 0xf0 << 16;
CMD = XY_COLOR_BLT_CMD;
CMD |= XY_BLT_WRITE_ALPHA;
BR13 |= pitch;
/* do space check before going any further */
aper_array[0] = intel->batch.bo;
aper_array[1] = region->bo;
if (drm_intel_bufmgr_check_aperture_space(aper_array,
ARRAY_SIZE(aper_array)) != 0) {
intel_batchbuffer_flush(intel);
}
BEGIN_BATCH(6);
OUT_BATCH(CMD | (6 - 2));
OUT_BATCH(BR13);
OUT_BATCH((y << 16) | x);
OUT_BATCH(((y + height) << 16) | (x + width));
OUT_RELOC_FENCED(region->bo,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
0);
OUT_BATCH(0xffffffff); /* white, but only alpha gets written */
ADVANCE_BATCH();
intel_batchbuffer_emit_mi_flush(intel);
}

View File

@ -1,67 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_BLIT_H
#define INTEL_BLIT_H
#include "intel_context.h"
extern void intelCopyBuffer(const __DRIdrawable * dpriv,
const drm_clip_rect_t * rect);
extern GLbitfield intelClearWithBlit(struct gl_context * ctx, GLbitfield mask);
bool intel_miptree_blit(struct intel_context *intel,
struct intel_mipmap_tree *src_mt,
int src_level, int src_slice,
uint32_t src_x, uint32_t src_y, bool src_flip,
struct intel_mipmap_tree *dst_mt,
int dst_level, int dst_slice,
uint32_t dst_x, uint32_t dst_y, bool dst_flip,
uint32_t width, uint32_t height,
enum gl_logicop_mode logicop);
bool
intelEmitImmediateColorExpandBlit(struct intel_context *intel,
GLuint cpp,
GLubyte *src_bits, GLuint src_size,
GLuint fg_color,
GLshort dst_pitch,
drm_intel_bo *dst_buffer,
GLuint dst_offset,
uint32_t dst_tiling,
GLshort x, GLshort y,
GLshort w, GLshort h,
enum gl_logicop_mode logic_op);
void intel_emit_linear_blit(struct intel_context *intel,
drm_intel_bo *dst_bo,
unsigned int dst_offset,
drm_intel_bo *src_bo,
unsigned int src_offset,
unsigned int size);
#endif

View File

@ -1,804 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/mtypes.h"
#include "main/macros.h"
#include "main/bufferobj.h"
#include "util/u_memory.h"
#include "intel_blit.h"
#include "intel_buffer_objects.h"
#include "intel_batchbuffer.h"
#include "intel_context.h"
#include "intel_fbo.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
static GLboolean
intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
gl_map_buffer_index index);
/** Allocates a new drm_intel_bo to store the data for the buffer object. */
static void
intel_bufferobj_alloc_buffer(struct intel_context *intel,
struct intel_buffer_object *intel_obj)
{
intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
intel_obj->Base.Size, 64);
}
static void
release_buffer(struct intel_buffer_object *intel_obj)
{
drm_intel_bo_unreference(intel_obj->buffer);
intel_obj->buffer = NULL;
intel_obj->offset = 0;
intel_obj->source = 0;
}
/**
* There is some duplication between mesa's bufferobjects and our
* bufmgr buffers. Both have an integer handle and a hashtable to
* lookup an opaque structure. It would be nice if the handles and
* internal structure where somehow shared.
*/
static struct gl_buffer_object *
intel_bufferobj_alloc(struct gl_context * ctx, GLuint name)
{
struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
_mesa_initialize_buffer_object(ctx, &obj->Base, name);
obj->buffer = NULL;
return &obj->Base;
}
/**
* Deallocate/free a vertex/pixel buffer object.
* Called via glDeleteBuffersARB().
*/
static void
intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
{
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
/* Buffer objects are automatically unmapped when deleting according
* to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
* (though it does if you call glDeleteBuffers)
*/
_mesa_buffer_unmap_all_mappings(ctx, obj);
align_free(intel_obj->sys_buffer);
drm_intel_bo_unreference(intel_obj->buffer);
_mesa_delete_buffer_object(ctx, obj);
}
/**
* Allocate space for and store data in a buffer object. Any data that was
* previously stored in the buffer object is lost. If data is NULL,
* memory will be allocated, but no copy will occur.
* Called via ctx->Driver.BufferData().
* \return true for success, false if out of memory
*/
static GLboolean
intel_bufferobj_data(struct gl_context * ctx,
GLenum target,
GLsizeiptrARB size,
const GLvoid * data,
GLenum usage,
GLbitfield storageFlags,
struct gl_buffer_object *obj)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
intel_obj->Base.Size = size;
intel_obj->Base.Usage = usage;
intel_obj->Base.StorageFlags = storageFlags;
assert(!obj->Mappings[MAP_USER].Pointer); /* Mesa should have unmapped it */
assert(!obj->Mappings[MAP_INTERNAL].Pointer);
if (intel_obj->buffer != NULL)
release_buffer(intel_obj);
align_free(intel_obj->sys_buffer);
intel_obj->sys_buffer = NULL;
if (size != 0) {
/* Stick VBOs in system memory, as we're always doing swtnl with their
* contents anyway.
*/
if (target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER) {
intel_obj->sys_buffer =
align_malloc(size, ctx->Const.MinMapBufferAlignment);
if (intel_obj->sys_buffer != NULL) {
if (data != NULL)
memcpy(intel_obj->sys_buffer, data, size);
return true;
}
}
intel_bufferobj_alloc_buffer(intel, intel_obj);
if (!intel_obj->buffer)
return false;
if (data != NULL)
drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
}
return true;
}
/**
* Replace data in a subrange of buffer object. If the data range
* specified by size + offset extends beyond the end of the buffer or
* if data is NULL, no copy is performed.
* Called via glBufferSubDataARB().
*/
static void
intel_bufferobj_subdata(struct gl_context * ctx,
GLintptrARB offset,
GLsizeiptrARB size,
const GLvoid * data, struct gl_buffer_object *obj)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
bool busy;
if (size == 0)
return;
assert(intel_obj);
/* If we have a single copy in system memory, update that */
if (intel_obj->sys_buffer) {
if (intel_obj->source)
release_buffer(intel_obj);
if (intel_obj->buffer == NULL) {
memcpy((char *)intel_obj->sys_buffer + offset, data, size);
return;
}
align_free(intel_obj->sys_buffer);
intel_obj->sys_buffer = NULL;
}
/* Otherwise we need to update the copy in video memory. */
busy =
drm_intel_bo_busy(intel_obj->buffer) ||
drm_intel_bo_references(intel->batch.bo, intel_obj->buffer);
if (busy) {
if (size == intel_obj->Base.Size) {
/* Replace the current busy bo with fresh data. */
drm_intel_bo_unreference(intel_obj->buffer);
intel_bufferobj_alloc_buffer(intel, intel_obj);
drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
} else {
perf_debug("Using a blit copy to avoid stalling on %ldb "
"glBufferSubData() to a busy buffer object.\n",
(long)size);
drm_intel_bo *temp_bo =
drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);
drm_intel_bo_subdata(temp_bo, 0, size, data);
intel_emit_linear_blit(intel,
intel_obj->buffer, offset,
temp_bo, 0,
size);
drm_intel_bo_unreference(temp_bo);
}
} else {
drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
}
}
/**
* Called via glGetBufferSubDataARB().
*/
static void
intel_bufferobj_get_subdata(struct gl_context * ctx,
GLintptrARB offset,
GLsizeiptrARB size,
GLvoid * data, struct gl_buffer_object *obj)
{
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
struct intel_context *intel = intel_context(ctx);
assert(intel_obj);
if (intel_obj->sys_buffer)
memcpy(data, (char *)intel_obj->sys_buffer + offset, size);
else {
if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
intel_batchbuffer_flush(intel);
}
drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
}
}
/**
* Called via glMapBufferRange and glMapBuffer
*
* The goal of this extension is to allow apps to accumulate their rendering
* at the same time as they accumulate their buffer object. Without it,
* you'd end up blocking on execution of rendering every time you mapped
* the buffer to put new data in.
*
* We support it in 3 ways: If unsynchronized, then don't bother
* flushing the batchbuffer before mapping the buffer, which can save blocking
* in many cases. If we would still block, and they allow the whole buffer
* to be invalidated, then just allocate a new buffer to replace the old one.
* If not, and we'd block, and they allow the subrange of the buffer to be
* invalidated, then we can make a new little BO, let them write into that,
* and blit it into the real BO at unmap time.
*/
static void *
intel_bufferobj_map_range(struct gl_context * ctx,
GLintptr offset, GLsizeiptr length,
GLbitfield access, struct gl_buffer_object *obj,
gl_map_buffer_index index)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
/* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
* internally uses our functions directly.
*/
obj->Mappings[index].Offset = offset;
obj->Mappings[index].Length = length;
obj->Mappings[index].AccessFlags = access;
if (intel_obj->sys_buffer) {
const bool read_only =
(access & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) == GL_MAP_READ_BIT;
if (!read_only && intel_obj->source)
release_buffer(intel_obj);
if (!intel_obj->buffer || intel_obj->source) {
obj->Mappings[index].Pointer = intel_obj->sys_buffer + offset;
return obj->Mappings[index].Pointer;
}
align_free(intel_obj->sys_buffer);
intel_obj->sys_buffer = NULL;
}
if (intel_obj->buffer == NULL) {
obj->Mappings[index].Pointer = NULL;
return NULL;
}
/* If the access is synchronized (like a normal buffer mapping), then get
* things flushed out so the later mapping syncs appropriately through GEM.
* If the user doesn't care about existing buffer contents and mapping would
* cause us to block, then throw out the old buffer.
*
* If they set INVALIDATE_BUFFER, we can pitch the current contents to
* achieve the required synchronization.
*/
if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
drm_intel_bo_unreference(intel_obj->buffer);
intel_bufferobj_alloc_buffer(intel, intel_obj);
} else {
perf_debug("Stalling on the GPU for mapping a busy buffer "
"object\n");
intel_flush(ctx);
}
} else if (drm_intel_bo_busy(intel_obj->buffer) &&
(access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
drm_intel_bo_unreference(intel_obj->buffer);
intel_bufferobj_alloc_buffer(intel, intel_obj);
}
}
/* If the user is mapping a range of an active buffer object but
* doesn't require the current contents of that range, make a new
* BO, and we'll copy what they put in there out at unmap or
* FlushRange time.
*/
if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
drm_intel_bo_busy(intel_obj->buffer)) {
/* Ensure that the base alignment of the allocation meets the alignment
* guarantees the driver has advertised to the application.
*/
const unsigned alignment = ctx->Const.MinMapBufferAlignment;
const unsigned extra = (uintptr_t) offset % alignment;
if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
intel_obj->range_map_buffer[index] =
align_malloc(length + extra, alignment);
obj->Mappings[index].Pointer =
intel_obj->range_map_buffer[index] + extra;
} else {
intel_obj->range_map_bo[index] = drm_intel_bo_alloc(intel->bufmgr,
"range map",
length + extra,
alignment);
if (!(access & GL_MAP_READ_BIT)) {
drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo[index]);
} else {
drm_intel_bo_map(intel_obj->range_map_bo[index],
(access & GL_MAP_WRITE_BIT) != 0);
}
obj->Mappings[index].Pointer =
intel_obj->range_map_bo[index]->virtual + extra;
}
return obj->Mappings[index].Pointer;
}
if (access & GL_MAP_UNSYNCHRONIZED_BIT)
drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
else if (!(access & GL_MAP_READ_BIT)) {
drm_intel_gem_bo_map_gtt(intel_obj->buffer);
} else {
drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
}
obj->Mappings[index].Pointer = intel_obj->buffer->virtual + offset;
return obj->Mappings[index].Pointer;
}
/* Ideally we'd use a BO to avoid taking up cache space for the temporary
* data, but FlushMappedBufferRange may be followed by further writes to
* the pointer, so we would have to re-map after emitting our blit, which
* would defeat the point.
*/
static void
intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
GLintptr offset, GLsizeiptr length,
struct gl_buffer_object *obj,
gl_map_buffer_index index)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
drm_intel_bo *temp_bo;
/* Unless we're in the range map using a temporary system buffer,
* there's no work to do.
*/
if (intel_obj->range_map_buffer[index] == NULL)
return;
if (length == 0)
return;
temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);
/* Use obj->Pointer instead of intel_obj->range_map_buffer because the
* former points to the actual mapping while the latter may be offset to
* meet alignment guarantees.
*/
drm_intel_bo_subdata(temp_bo, 0, length, obj->Mappings[index].Pointer);
intel_emit_linear_blit(intel,
intel_obj->buffer,
obj->Mappings[index].Offset + offset,
temp_bo, 0,
length);
drm_intel_bo_unreference(temp_bo);
}
/**
* Called via glUnmapBuffer().
*/
static GLboolean
intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
gl_map_buffer_index index)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
assert(obj->Mappings[index].Pointer);
if (intel_obj->sys_buffer != NULL) {
/* always keep the mapping around. */
} else if (intel_obj->range_map_buffer[index] != NULL) {
/* Since we've emitted some blits to buffers that will (likely) be used
* in rendering operations in other cache domains in this batch, emit a
* flush. Once again, we wish for a domain tracker in libdrm to cover
* usage inside of a batchbuffer.
*/
intel_batchbuffer_emit_mi_flush(intel);
align_free(intel_obj->range_map_buffer[index]);
intel_obj->range_map_buffer[index] = NULL;
} else if (intel_obj->range_map_bo[index] != NULL) {
const unsigned extra = obj->Mappings[index].Pointer -
intel_obj->range_map_bo[index]->virtual;
drm_intel_bo_unmap(intel_obj->range_map_bo[index]);
intel_emit_linear_blit(intel,
intel_obj->buffer, obj->Mappings[index].Offset,
intel_obj->range_map_bo[index], extra,
obj->Mappings[index].Length);
/* Since we've emitted some blits to buffers that will (likely) be used
* in rendering operations in other cache domains in this batch, emit a
* flush. Once again, we wish for a domain tracker in libdrm to cover
* usage inside of a batchbuffer.
*/
intel_batchbuffer_emit_mi_flush(intel);
drm_intel_bo_unreference(intel_obj->range_map_bo[index]);
intel_obj->range_map_bo[index] = NULL;
} else if (intel_obj->buffer != NULL) {
drm_intel_bo_unmap(intel_obj->buffer);
}
obj->Mappings[index].Pointer = NULL;
obj->Mappings[index].Offset = 0;
obj->Mappings[index].Length = 0;
return true;
}
drm_intel_bo *
intel_bufferobj_buffer(struct intel_context *intel,
struct intel_buffer_object *intel_obj)
{
if (intel_obj->source)
release_buffer(intel_obj);
if (intel_obj->buffer == NULL) {
intel_bufferobj_alloc_buffer(intel, intel_obj);
drm_intel_bo_subdata(intel_obj->buffer,
0, intel_obj->Base.Size,
intel_obj->sys_buffer);
align_free(intel_obj->sys_buffer);
intel_obj->sys_buffer = NULL;
intel_obj->offset = 0;
}
return intel_obj->buffer;
}
#define INTEL_UPLOAD_SIZE (64*1024)
void
intel_upload_finish(struct intel_context *intel)
{
if (!intel->upload.bo)
return;
if (intel->upload.buffer_len) {
drm_intel_bo_subdata(intel->upload.bo,
intel->upload.buffer_offset,
intel->upload.buffer_len,
intel->upload.buffer);
intel->upload.buffer_len = 0;
}
drm_intel_bo_unreference(intel->upload.bo);
intel->upload.bo = NULL;
}
static void wrap_buffers(struct intel_context *intel, GLuint size)
{
intel_upload_finish(intel);
if (size < INTEL_UPLOAD_SIZE)
size = INTEL_UPLOAD_SIZE;
intel->upload.bo = drm_intel_bo_alloc(intel->bufmgr, "upload", size, 0);
intel->upload.offset = 0;
}
void intel_upload_data(struct intel_context *intel,
const void *ptr, GLuint size, GLuint align,
drm_intel_bo **return_bo,
GLuint *return_offset)
{
GLuint base, delta;
base = (intel->upload.offset + align - 1) / align * align;
if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
wrap_buffers(intel, size);
base = 0;
}
drm_intel_bo_reference(intel->upload.bo);
*return_bo = intel->upload.bo;
*return_offset = base;
delta = base - intel->upload.offset;
if (intel->upload.buffer_len &&
intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
{
drm_intel_bo_subdata(intel->upload.bo,
intel->upload.buffer_offset,
intel->upload.buffer_len,
intel->upload.buffer);
intel->upload.buffer_len = 0;
}
if (size < sizeof(intel->upload.buffer))
{
if (intel->upload.buffer_len == 0)
intel->upload.buffer_offset = base;
else
intel->upload.buffer_len += delta;
memcpy(intel->upload.buffer + intel->upload.buffer_len, ptr, size);
intel->upload.buffer_len += size;
}
else
{
drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
}
intel->upload.offset = base + size;
}
drm_intel_bo *
intel_bufferobj_source(struct intel_context *intel,
struct intel_buffer_object *intel_obj,
GLuint align, GLuint *offset)
{
if (intel_obj->buffer == NULL) {
intel_upload_data(intel,
intel_obj->sys_buffer, intel_obj->Base.Size, align,
&intel_obj->buffer, &intel_obj->offset);
intel_obj->source = 1;
}
*offset = intel_obj->offset;
return intel_obj->buffer;
}
static void
intel_bufferobj_copy_subdata(struct gl_context *ctx,
struct gl_buffer_object *src,
struct gl_buffer_object *dst,
GLintptr read_offset, GLintptr write_offset,
GLsizeiptr size)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_src = intel_buffer_object(src);
struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
drm_intel_bo *src_bo, *dst_bo;
GLuint src_offset;
if (size == 0)
return;
/* If we're in system memory, just map and memcpy. */
if (intel_src->sys_buffer || intel_dst->sys_buffer) {
/* The same buffer may be used, but note that regions copied may
* not overlap.
*/
if (src == dst) {
char *ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
GL_MAP_READ_BIT |
GL_MAP_WRITE_BIT,
dst, MAP_INTERNAL);
memmove(ptr + write_offset, ptr + read_offset, size);
intel_bufferobj_unmap(ctx, dst, MAP_INTERNAL);
} else {
const char *src_ptr;
char *dst_ptr;
src_ptr = intel_bufferobj_map_range(ctx, 0, src->Size,
GL_MAP_READ_BIT, src,
MAP_INTERNAL);
dst_ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
GL_MAP_WRITE_BIT, dst,
MAP_INTERNAL);
memcpy(dst_ptr + write_offset, src_ptr + read_offset, size);
intel_bufferobj_unmap(ctx, src, MAP_INTERNAL);
intel_bufferobj_unmap(ctx, dst, MAP_INTERNAL);
}
return;
}
/* Otherwise, we have real BOs, so blit them. */
dst_bo = intel_bufferobj_buffer(intel, intel_dst);
src_bo = intel_bufferobj_source(intel, intel_src, 64, &src_offset);
intel_emit_linear_blit(intel,
dst_bo, write_offset,
src_bo, read_offset + src_offset, size);
/* Since we've emitted some blits to buffers that will (likely) be used
* in rendering operations in other cache domains in this batch, emit a
* flush. Once again, we wish for a domain tracker in libdrm to cover
* usage inside of a batchbuffer.
*/
intel_batchbuffer_emit_mi_flush(intel);
}
static GLenum
intel_buffer_purgeable(drm_intel_bo *buffer)
{
int retained = 0;
if (buffer != NULL)
retained = drm_intel_bo_madvise (buffer, I915_MADV_DONTNEED);
return retained ? GL_VOLATILE_APPLE : GL_RELEASED_APPLE;
}
static GLenum
intel_buffer_object_purgeable(struct gl_context * ctx,
struct gl_buffer_object *obj,
GLenum option)
{
struct intel_buffer_object *intel_obj = intel_buffer_object (obj);
if (intel_obj->buffer != NULL)
return intel_buffer_purgeable(intel_obj->buffer);
if (option == GL_RELEASED_APPLE) {
align_free(intel_obj->sys_buffer);
intel_obj->sys_buffer = NULL;
return GL_RELEASED_APPLE;
} else {
/* XXX Create the buffer and madvise(MADV_DONTNEED)? */
struct intel_context *intel = intel_context(ctx);
drm_intel_bo *bo = intel_bufferobj_buffer(intel, intel_obj);
return intel_buffer_purgeable(bo);
}
}
static GLenum
intel_texture_object_purgeable(struct gl_context * ctx,
struct gl_texture_object *obj,
GLenum option)
{
struct intel_texture_object *intel;
(void) ctx;
(void) option;
intel = intel_texture_object(obj);
if (intel->mt == NULL || intel->mt->region == NULL)
return GL_RELEASED_APPLE;
return intel_buffer_purgeable(intel->mt->region->bo);
}
static GLenum
intel_render_object_purgeable(struct gl_context * ctx,
struct gl_renderbuffer *obj,
GLenum option)
{
struct intel_renderbuffer *intel;
(void) ctx;
(void) option;
intel = intel_renderbuffer(obj);
if (intel->mt == NULL)
return GL_RELEASED_APPLE;
return intel_buffer_purgeable(intel->mt->region->bo);
}
static GLenum
intel_buffer_unpurgeable(drm_intel_bo *buffer)
{
int retained;
retained = 0;
if (buffer != NULL)
retained = drm_intel_bo_madvise (buffer, I915_MADV_WILLNEED);
return retained ? GL_RETAINED_APPLE : GL_UNDEFINED_APPLE;
}
static GLenum
intel_buffer_object_unpurgeable(struct gl_context * ctx,
struct gl_buffer_object *obj,
GLenum option)
{
(void) ctx;
(void) option;
return intel_buffer_unpurgeable(intel_buffer_object (obj)->buffer);
}
static GLenum
intel_texture_object_unpurgeable(struct gl_context * ctx,
struct gl_texture_object *obj,
GLenum option)
{
struct intel_texture_object *intel;
(void) ctx;
(void) option;
intel = intel_texture_object(obj);
if (intel->mt == NULL || intel->mt->region == NULL)
return GL_UNDEFINED_APPLE;
return intel_buffer_unpurgeable(intel->mt->region->bo);
}
static GLenum
intel_render_object_unpurgeable(struct gl_context * ctx,
struct gl_renderbuffer *obj,
GLenum option)
{
struct intel_renderbuffer *intel;
(void) ctx;
(void) option;
intel = intel_renderbuffer(obj);
if (intel->mt == NULL)
return GL_UNDEFINED_APPLE;
return intel_buffer_unpurgeable(intel->mt->region->bo);
}
void
intelInitBufferObjectFuncs(struct dd_function_table *functions)
{
functions->NewBufferObject = intel_bufferobj_alloc;
functions->DeleteBuffer = intel_bufferobj_free;
functions->BufferData = intel_bufferobj_data;
functions->BufferSubData = intel_bufferobj_subdata;
functions->GetBufferSubData = intel_bufferobj_get_subdata;
functions->MapBufferRange = intel_bufferobj_map_range;
functions->FlushMappedBufferRange = intel_bufferobj_flush_mapped_range;
functions->UnmapBuffer = intel_bufferobj_unmap;
functions->CopyBufferSubData = intel_bufferobj_copy_subdata;
functions->BufferObjectPurgeable = intel_buffer_object_purgeable;
functions->TextureObjectPurgeable = intel_texture_object_purgeable;
functions->RenderObjectPurgeable = intel_render_object_purgeable;
functions->BufferObjectUnpurgeable = intel_buffer_object_unpurgeable;
functions->TextureObjectUnpurgeable = intel_texture_object_unpurgeable;
functions->RenderObjectUnpurgeable = intel_render_object_unpurgeable;
}

View File

@ -1,82 +0,0 @@
/**************************************************************************
*
* Copyright 2005 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_BUFFEROBJ_H
#define INTEL_BUFFEROBJ_H
#include "main/mtypes.h"
struct intel_context;
struct gl_buffer_object;
/**
* Intel vertex/pixel buffer object, derived from Mesa's gl_buffer_object.
*/
struct intel_buffer_object
{
struct gl_buffer_object Base;
drm_intel_bo *buffer; /* the low-level buffer manager's buffer handle */
GLuint offset; /* any offset into that buffer */
/** System memory buffer data, if not using a BO to store the data. */
void *sys_buffer;
drm_intel_bo *range_map_bo[MAP_COUNT];
void *range_map_buffer[MAP_COUNT];
bool source;
};
/* Get the bm buffer associated with a GL bufferobject:
*/
drm_intel_bo *intel_bufferobj_buffer(struct intel_context *intel,
struct intel_buffer_object *obj);
drm_intel_bo *intel_bufferobj_source(struct intel_context *intel,
struct intel_buffer_object *obj,
GLuint align,
GLuint *offset);
void intel_upload_data(struct intel_context *intel,
const void *ptr, GLuint size, GLuint align,
drm_intel_bo **return_bo,
GLuint *return_offset);
void intel_upload_finish(struct intel_context *intel);
/* Hook the bufferobject implementation into mesa:
*/
void intelInitBufferObjectFuncs(struct dd_function_table *functions);
static inline struct intel_buffer_object *
intel_buffer_object(struct gl_buffer_object *obj)
{
return (struct intel_buffer_object *) obj;
}
#endif

View File

@ -1,94 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "intel_context.h"
#include "intel_buffers.h"
#include "intel_fbo.h"
#include "intel_mipmap_tree.h"
#include "main/fbobject.h"
#include "main/framebuffer.h"
#include "main/renderbuffer.h"
/**
* Check if we're about to draw into the front color buffer.
* If so, set the intel->front_buffer_dirty field to true.
*/
void
intel_check_front_buffer_rendering(struct intel_context *intel)
{
const struct gl_framebuffer *fb = intel->ctx.DrawBuffer;
if (_mesa_is_winsys_fbo(fb)) {
/* drawing to window system buffer */
if (fb->_NumColorDrawBuffers > 0) {
if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
intel->front_buffer_dirty = true;
}
}
}
}
static void
intelDrawBuffer(struct gl_context * ctx)
{
if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer)) {
struct intel_context *const intel = intel_context(ctx);
/* If we might be front-buffer rendering on this buffer for the first
* time, invalidate our DRI drawable so we'll ask for new buffers
* (including the fake front) before we start rendering again.
*/
if (intel->driContext->driDrawablePriv)
dri2InvalidateDrawable(intel->driContext->driDrawablePriv);
}
intel_draw_buffer(ctx);
}
static void
intelReadBuffer(struct gl_context * ctx, GLenum mode)
{
if (_mesa_is_front_buffer_reading(ctx->ReadBuffer)) {
struct intel_context *const intel = intel_context(ctx);
/* If we might be front-buffer reading on this buffer for the first
* time, invalidate our DRI drawable so we'll ask for new buffers
* (including the fake front) before we start reading again.
*/
if (intel->driContext->driReadablePriv)
dri2InvalidateDrawable(intel->driContext->driReadablePriv);
}
}
void
intelInitBufferFuncs(struct dd_function_table *functions)
{
functions->DrawBuffer = intelDrawBuffer;
functions->ReadBuffer = intelReadBuffer;
}

View File

@ -1,51 +0,0 @@
/**************************************************************************
*
* Copyright 2006 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_BUFFERS_H
#define INTEL_BUFFERS_H
#include "dri_util.h"
#include "drm-uapi/drm.h"
#include "intel_context.h"
struct intel_context;
extern void intel_check_front_buffer_rendering(struct intel_context *intel);
static inline void
intel_draw_buffer(struct gl_context * ctx)
{
struct intel_context *intel = intel_context(ctx);
intel->vtbl.update_draw_buffer(intel);
}
extern void intelInitBufferFuncs(struct dd_function_table *functions);
void intelCalcViewport(struct gl_context * ctx);
#endif /* INTEL_BUFFERS_H */

View File

@ -1,64 +0,0 @@
/*
* Copyright © 2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#define PCI_CHIP_I810 0x7121
#define PCI_CHIP_I810_DC100 0x7123
#define PCI_CHIP_I810_E 0x7125
#define PCI_CHIP_I815 0x1132
#define PCI_CHIP_I830_M 0x3577
#define PCI_CHIP_845_G 0x2562
#define PCI_CHIP_I855_GM 0x3582
#define PCI_CHIP_I865_G 0x2572
#define PCI_CHIP_I915_G 0x2582
#define PCI_CHIP_E7221_G 0x258A
#define PCI_CHIP_I915_GM 0x2592
#define PCI_CHIP_I945_G 0x2772
#define PCI_CHIP_I945_GM 0x27A2
#define PCI_CHIP_I945_GME 0x27AE
#define PCI_CHIP_Q35_G 0x29B2
#define PCI_CHIP_G33_G 0x29C2
#define PCI_CHIP_Q33_G 0x29D2
#define PCI_CHIP_PNV_GM 0xA011
#define PCI_CHIP_PNV_G 0xA001
#define IS_945(devid) (devid == PCI_CHIP_I945_G || \
devid == PCI_CHIP_I945_GM || \
devid == PCI_CHIP_I945_GME || \
devid == PCI_CHIP_G33_G || \
devid == PCI_CHIP_Q33_G || \
devid == PCI_CHIP_Q35_G || \
devid == PCI_CHIP_PNV_G || \
devid == PCI_CHIP_PNV_GM)
#define IS_GEN3(devid) (devid == PCI_CHIP_I915_G || \
devid == PCI_CHIP_E7221_G || \
devid == PCI_CHIP_I915_GM || \
IS_945(devid))

View File

@ -1,192 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* Copyright 2009 Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/glheader.h"
#include "main/mtypes.h"
#include "main/condrender.h"
#include "swrast/swrast.h"
#include "drivers/common/meta.h"
#include "intel_context.h"
#include "intel_blit.h"
#include "intel_clear.h"
#include "intel_fbo.h"
#include "intel_regions.h"
#define FILE_DEBUG_FLAG DEBUG_BLIT
static const char *buffer_names[] = {
[BUFFER_FRONT_LEFT] = "front",
[BUFFER_BACK_LEFT] = "back",
[BUFFER_FRONT_RIGHT] = "front right",
[BUFFER_BACK_RIGHT] = "back right",
[BUFFER_DEPTH] = "depth",
[BUFFER_STENCIL] = "stencil",
[BUFFER_ACCUM] = "accum",
[BUFFER_COLOR0] = "color0",
[BUFFER_COLOR1] = "color1",
[BUFFER_COLOR2] = "color2",
[BUFFER_COLOR3] = "color3",
[BUFFER_COLOR4] = "color4",
[BUFFER_COLOR5] = "color5",
[BUFFER_COLOR6] = "color6",
[BUFFER_COLOR7] = "color7",
};
static void
debug_mask(const char *name, GLbitfield mask)
{
GLuint i;
if (unlikely(INTEL_DEBUG & DEBUG_BLIT)) {
DBG("%s clear:", name);
for (i = 0; i < BUFFER_COUNT; i++) {
if (mask & (1 << i))
DBG(" %s", buffer_names[i]);
}
DBG("\n");
}
}
/**
* Called by ctx->Driver.Clear.
*/
static void
intelClear(struct gl_context *ctx, GLbitfield mask)
{
struct intel_context *intel = intel_context(ctx);
GLbitfield tri_mask = 0;
GLbitfield blit_mask = 0;
GLbitfield swrast_mask = 0;
struct gl_framebuffer *fb = ctx->DrawBuffer;
struct intel_renderbuffer *irb;
int i;
if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
intel->front_buffer_dirty = true;
}
if (0)
fprintf(stderr, "%s\n", __func__);
/* Get SW clears out of the way: Anything without an intel_renderbuffer */
for (i = 0; i < BUFFER_COUNT; i++) {
if (!(mask & (1 << i)))
continue;
irb = intel_get_renderbuffer(fb, i);
if (unlikely(!irb)) {
swrast_mask |= (1 << i);
mask &= ~(1 << i);
}
}
if (unlikely(swrast_mask)) {
debug_mask("swrast", swrast_mask);
_swrast_Clear(ctx, swrast_mask);
}
/* HW color buffers (front, back, aux, generic FBO, etc) */
if (GET_COLORMASK(ctx->Color.ColorMask, 0) == 0xf) {
/* clear all R,G,B,A */
blit_mask |= (mask & BUFFER_BITS_COLOR);
}
else {
/* glColorMask in effect */
tri_mask |= (mask & BUFFER_BITS_COLOR);
}
/* Make sure we have up to date buffers before we start looking at
* the tiling bits to determine how to clear. */
intel_prepare_render(intel);
/* HW stencil */
if (mask & BUFFER_BIT_STENCIL) {
const struct intel_region *stencilRegion
= intel_get_rb_region(fb, BUFFER_STENCIL);
if (stencilRegion) {
/* have hw stencil */
if (stencilRegion->tiling == I915_TILING_Y ||
(ctx->Stencil.WriteMask[0] & 0xff) != 0xff) {
/* We have to use the 3D engine if we're clearing a partial mask
* of the stencil buffer, or if we're on a 965 which has a tiled
* depth/stencil buffer in a layout we can't blit to.
*/
tri_mask |= BUFFER_BIT_STENCIL;
}
else {
/* clearing all stencil bits, use blitting */
blit_mask |= BUFFER_BIT_STENCIL;
}
}
}
/* HW depth */
if (mask & BUFFER_BIT_DEPTH) {
const struct intel_region *irb = intel_get_rb_region(fb, BUFFER_DEPTH);
/* clear depth with whatever method is used for stencil (see above) */
if (irb->tiling == I915_TILING_Y || tri_mask & BUFFER_BIT_STENCIL)
tri_mask |= BUFFER_BIT_DEPTH;
else
blit_mask |= BUFFER_BIT_DEPTH;
}
/* If we're doing a tri pass for depth/stencil, include a likely color
* buffer with it.
*/
if (mask & (BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL)) {
int color_bit = ffs(mask & BUFFER_BITS_COLOR);
if (color_bit != 0) {
tri_mask |= blit_mask & (1 << (color_bit - 1));
blit_mask &= ~(1 << (color_bit - 1));
}
}
/* Anything left, just use tris */
tri_mask |= mask & ~blit_mask;
if (blit_mask) {
debug_mask("blit", blit_mask);
tri_mask |= intelClearWithBlit(ctx, blit_mask);
}
if (tri_mask) {
debug_mask("tri", tri_mask);
if (!ctx->Extensions.ARB_fragment_shader)
_mesa_meta_Clear(&intel->ctx, tri_mask);
else
_mesa_meta_glsl_Clear(&intel->ctx, tri_mask);
}
}
void
intelInitClearFuncs(struct dd_function_table *functions)
{
functions->Clear = intelClear;
}

View File

@ -1,38 +0,0 @@
/**************************************************************************
*
* Copyright 2006 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_CLEAR_H
#define INTEL_CLEAR_H
struct dd_function_table;
extern void
intelInitClearFuncs(struct dd_function_table *functions);
#endif /* INTEL_CLEAR_H */

View File

@ -1,898 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/glheader.h"
#include "main/context.h"
#include "main/extensions.h"
#include "main/fbobject.h"
#include "main/framebuffer.h"
#include "main/points.h"
#include "main/renderbuffer.h"
#include "swrast/swrast.h"
#include "swrast_setup/swrast_setup.h"
#include "tnl/tnl.h"
#include "drivers/common/driverfuncs.h"
#include "drivers/common/meta.h"
#include "intel_chipset.h"
#include "intel_buffers.h"
#include "intel_tex.h"
#include "intel_batchbuffer.h"
#include "intel_clear.h"
#include "intel_extensions.h"
#include "intel_pixel.h"
#include "intel_regions.h"
#include "intel_buffer_objects.h"
#include "intel_fbo.h"
#include "intel_bufmgr.h"
#include "intel_screen.h"
#include "intel_mipmap_tree.h"
#include "utils.h"
#include "util/debug.h"
#include "util/ralloc.h"
#include "util/u_memory.h"
int INTEL_DEBUG = (0);
const char *const i915_vendor_string = "Intel Open Source Technology Center";
const char *
i915_get_renderer_string(unsigned deviceID)
{
const char *chipset;
static char buffer[128];
switch (deviceID) {
#undef CHIPSET
#define CHIPSET(id, symbol, str) case id: chipset = str; break;
#include "pci_ids/i830_pci_ids.h"
#include "pci_ids/i915_pci_ids.h"
default:
chipset = "Unknown Intel Chipset";
break;
}
(void) driGetRendererString(buffer, chipset, 0);
return buffer;
}
static const GLubyte *
intelGetString(struct gl_context * ctx, GLenum name)
{
const struct intel_context *const intel = intel_context(ctx);
switch (name) {
case GL_VENDOR:
return (GLubyte *) i915_vendor_string;
case GL_RENDERER:
return
(GLubyte *) i915_get_renderer_string(intel->intelScreen->deviceID);
default:
return NULL;
}
}
#define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
static void
intel_flush_front(struct gl_context *ctx)
{
struct intel_context *intel = intel_context(ctx);
__DRIcontext *driContext = intel->driContext;
__DRIdrawable *driDrawable = driContext->driDrawablePriv;
__DRIscreen *const screen = intel->intelScreen->driScrnPriv;
if (intel->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
if (flushFront(screen) &&
driDrawable &&
driDrawable->loaderPrivate) {
flushFront(screen)(driDrawable, driDrawable->loaderPrivate);
/* We set the dirty bit in intel_prepare_render() if we're
* front buffer rendering once we get there.
*/
intel->front_buffer_dirty = false;
}
}
}
static void
intel_update_image_buffers(struct intel_context *intel, __DRIdrawable *drawable);
static unsigned
intel_bits_per_pixel(const struct intel_renderbuffer *rb)
{
return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
}
static void
intel_query_dri2_buffers(struct intel_context *intel,
__DRIdrawable *drawable,
__DRIbuffer **buffers,
int *count);
static void
intel_process_dri2_buffer(struct intel_context *intel,
__DRIdrawable *drawable,
__DRIbuffer *buffer,
struct intel_renderbuffer *rb,
const char *buffer_name);
static void
intel_update_dri2_buffers(struct intel_context *intel, __DRIdrawable *drawable)
{
__DRIbuffer *buffers = NULL;
int i, count;
const char *region_name;
struct intel_renderbuffer *rb;
struct gl_framebuffer *fb = drawable->driverPrivate;
intel_query_dri2_buffers(intel, drawable, &buffers, &count);
if (buffers == NULL)
return;
for (i = 0; i < count; i++) {
switch (buffers[i].attachment) {
case __DRI_BUFFER_FRONT_LEFT:
rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
region_name = "dri2 front buffer";
break;
case __DRI_BUFFER_FAKE_FRONT_LEFT:
rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
region_name = "dri2 fake front buffer";
break;
case __DRI_BUFFER_BACK_LEFT:
rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
region_name = "dri2 back buffer";
break;
case __DRI_BUFFER_DEPTH:
case __DRI_BUFFER_HIZ:
case __DRI_BUFFER_DEPTH_STENCIL:
case __DRI_BUFFER_STENCIL:
case __DRI_BUFFER_ACCUM:
default:
fprintf(stderr,
"unhandled buffer attach event, attachment type %d\n",
buffers[i].attachment);
return;
}
intel_process_dri2_buffer(intel, drawable, &buffers[i], rb, region_name);
}
}
void
intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
{
struct intel_context *intel = context->driverPrivate;
__DRIscreen *screen = intel->intelScreen->driScrnPriv;
/* Set this up front, so that in case our buffers get invalidated
* while we're getting new buffers, we don't clobber the stamp and
* thus ignore the invalidate. */
drawable->lastStamp = drawable->dri2.stamp;
if (unlikely(INTEL_DEBUG & DEBUG_DRI))
fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
if (screen->image.loader)
intel_update_image_buffers(intel, drawable);
else
intel_update_dri2_buffers(intel, drawable);
driUpdateFramebufferSize(&intel->ctx, drawable);
}
/**
* intel_prepare_render should be called anywhere that curent read/drawbuffer
* state is required.
*/
void
intel_prepare_render(struct intel_context *intel)
{
__DRIcontext *driContext = intel->driContext;
__DRIdrawable *drawable;
drawable = driContext->driDrawablePriv;
if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
if (drawable->lastStamp != drawable->dri2.stamp)
intel_update_renderbuffers(driContext, drawable);
intel_draw_buffer(&intel->ctx);
driContext->dri2.draw_stamp = drawable->dri2.stamp;
}
drawable = driContext->driReadablePriv;
if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
if (drawable->lastStamp != drawable->dri2.stamp)
intel_update_renderbuffers(driContext, drawable);
driContext->dri2.read_stamp = drawable->dri2.stamp;
}
/* If we're currently rendering to the front buffer, the rendering
* that will happen next will probably dirty the front buffer. So
* mark it as dirty here.
*/
if (_mesa_is_front_buffer_drawing(intel->ctx.DrawBuffer))
intel->front_buffer_dirty = true;
/* Wait for the swapbuffers before the one we just emitted, so we
* don't get too many swaps outstanding for apps that are GPU-heavy
* but not CPU-heavy.
*
* We're using intelDRI2Flush (called from the loader before
* swapbuffer) and glFlush (for front buffer rendering) as the
* indicator that a frame is done and then throttle when we get
* here as we prepare to render the next frame. At this point for
* round trips for swap/copy and getting new buffers are done and
* we'll spend less time waiting on the GPU.
*
* Unfortunately, we don't have a handle to the batch containing
* the swap, and getting our hands on that doesn't seem worth it,
* so we just us the first batch we emitted after the last swap.
*/
if (intel->need_throttle && intel->first_post_swapbuffers_batch) {
if (!intel->disable_throttling)
drm_intel_bo_wait_rendering(intel->first_post_swapbuffers_batch);
drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
intel->first_post_swapbuffers_batch = NULL;
intel->need_throttle = false;
}
}
static void
intel_noninvalidate_viewport(struct gl_context *ctx)
{
struct intel_context *intel = intel_context(ctx);
__DRIcontext *driContext = intel->driContext;
intelCalcViewport(ctx);
if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
dri2InvalidateDrawable(driContext->driDrawablePriv);
dri2InvalidateDrawable(driContext->driReadablePriv);
}
}
static void
intel_viewport(struct gl_context *ctx)
{
intelCalcViewport(ctx);
}
static const struct debug_control debug_control[] = {
{ "tex", DEBUG_TEXTURE},
{ "state", DEBUG_STATE},
{ "blit", DEBUG_BLIT},
{ "mip", DEBUG_MIPTREE},
{ "fall", DEBUG_PERF},
{ "perf", DEBUG_PERF},
{ "bat", DEBUG_BATCH},
{ "pix", DEBUG_PIXEL},
{ "buf", DEBUG_BUFMGR},
{ "reg", DEBUG_REGION},
{ "fbo", DEBUG_FBO},
{ "fs", DEBUG_WM },
{ "sync", DEBUG_SYNC},
{ "dri", DEBUG_DRI },
{ "stats", DEBUG_STATS },
{ "wm", DEBUG_WM },
{ "aub", DEBUG_AUB },
{ NULL, 0 }
};
static void
intelInvalidateState(struct gl_context * ctx)
{
GLuint new_state = ctx->NewState;
struct intel_context *intel = intel_context(ctx);
if (ctx->swrast_context)
_swrast_InvalidateState(ctx, new_state);
intel->NewGLState |= new_state;
if (new_state & (_NEW_SCISSOR | _NEW_BUFFERS | _NEW_VIEWPORT))
_mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer);
if (intel->vtbl.invalidate_state)
intel->vtbl.invalidate_state( intel, new_state );
}
void
intel_flush_rendering_to_batch(struct gl_context *ctx)
{
struct intel_context *intel = intel_context(ctx);
if (intel->Fallback)
_swrast_flush(ctx);
INTEL_FIREVERTICES(intel);
}
void
_intel_flush(struct gl_context *ctx, const char *file, int line)
{
struct intel_context *intel = intel_context(ctx);
intel_flush_rendering_to_batch(ctx);
if (intel->batch.used)
_intel_batchbuffer_flush(intel, file, line);
}
static void
intel_glFlush(struct gl_context *ctx, unsigned gallium_flush_flags)
{
struct intel_context *intel = intel_context(ctx);
intel_flush(ctx);
intel_flush_front(ctx);
if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer))
intel->need_throttle = true;
}
void
intelFinish(struct gl_context * ctx)
{
struct intel_context *intel = intel_context(ctx);
intel_flush(ctx);
intel_flush_front(ctx);
if (intel->batch.last_bo)
drm_intel_bo_wait_rendering(intel->batch.last_bo);
}
void
intelInitDriverFunctions(struct dd_function_table *functions)
{
_mesa_init_driver_functions(functions);
_tnl_init_driver_draw_function(functions);
functions->Flush = intel_glFlush;
functions->Finish = intelFinish;
functions->GetString = intelGetString;
functions->UpdateState = intelInvalidateState;
intelInitTextureFuncs(functions);
intelInitTextureImageFuncs(functions);
intelInitTextureSubImageFuncs(functions);
intelInitTextureCopyImageFuncs(functions);
intelInitClearFuncs(functions);
intelInitBufferFuncs(functions);
intelInitPixelFuncs(functions);
intelInitBufferObjectFuncs(functions);
intel_init_syncobj_functions(functions);
}
bool
intelInitContext(struct intel_context *intel,
int api,
unsigned major_version,
unsigned minor_version,
uint32_t flags,
const struct gl_config * mesaVis,
__DRIcontext * driContextPriv,
void *sharedContextPrivate,
struct dd_function_table *functions,
unsigned *dri_ctx_error)
{
struct gl_context *ctx = &intel->ctx;
struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
__DRIscreen *sPriv = driContextPriv->driScreenPriv;
struct intel_screen *intelScreen = sPriv->driverPrivate;
int bo_reuse_mode;
/* Can't rely on invalidate events, fall back to glViewport hack */
if (!driContextPriv->driScreenPriv->dri2.useInvalidate)
functions->Viewport = intel_noninvalidate_viewport;
else
functions->Viewport = intel_viewport;
intel->intelScreen = intelScreen;
if (!_mesa_initialize_context(&intel->ctx, api, mesaVis, shareCtx,
functions)) {
*dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
printf("%s: failed to init mesa context\n", __func__);
return false;
}
driContextSetFlags(&intel->ctx, flags);
driContextPriv->driverPrivate = intel;
intel->driContext = driContextPriv;
intel->gen = intelScreen->gen;
const int devID = intelScreen->deviceID;
intel->is_945 = IS_945(devID);
memset(&ctx->TextureFormatSupported,
0, sizeof(ctx->TextureFormatSupported));
driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
sPriv->myNum, "i915", NULL, NULL, NULL, 0, NULL, 0);
intel->maxBatchSize = 4096;
/* Estimate the size of the mappable aperture into the GTT. There's an
* ioctl to get the whole GTT size, but not one to get the mappable subset.
* It turns out it's basically always 256MB, though some ancient hardware
* was smaller.
*/
uint32_t gtt_size = 256 * 1024 * 1024;
if (intel->gen == 2)
gtt_size = 128 * 1024 * 1024;
/* We don't want to map two objects such that a memcpy between them would
* just fault one mapping in and then the other over and over forever. So
* we would need to divide the GTT size by 2. Additionally, some GTT is
* taken up by things like the framebuffer and the ringbuffer and such, so
* be more conservative.
*/
intel->max_gtt_map_object_size = gtt_size / 4;
intel->bufmgr = intelScreen->bufmgr;
bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
switch (bo_reuse_mode) {
case DRI_CONF_BO_REUSE_DISABLED:
break;
case DRI_CONF_BO_REUSE_ALL:
intel_bufmgr_gem_enable_reuse(intel->bufmgr);
break;
}
ctx->Const.MinLineWidth = 1.0;
ctx->Const.MinLineWidthAA = 1.0;
ctx->Const.MaxLineWidth = 7.0;
ctx->Const.MaxLineWidthAA = 7.0;
ctx->Const.LineWidthGranularity = 0.5;
ctx->Const.MinPointSize = 1.0;
ctx->Const.MinPointSizeAA = 1.0;
ctx->Const.MaxPointSize = 255.0;
ctx->Const.MaxPointSizeAA = 3.0;
ctx->Const.PointSizeGranularity = 1.0;
ctx->Const.StripTextureBorder = GL_TRUE;
/* reinitialize the context point state.
* It depend on constants in __struct gl_contextRec::Const
*/
_mesa_init_point(ctx);
ctx->Const.MaxRenderbufferSize = 2048;
_swrast_CreateContext(ctx);
_vbo_CreateContext(ctx, false);
if (ctx->swrast_context) {
_tnl_CreateContext(ctx);
_swsetup_CreateContext(ctx);
/* Configure swrast to match hardware characteristics: */
_swrast_allow_pixel_fog(ctx, false);
_swrast_allow_vertex_fog(ctx, true);
}
_mesa_meta_init(ctx);
intel->hw_stipple = 1;
intel->RenderIndex = ~0;
intelInitExtensions(ctx);
INTEL_DEBUG = parse_debug_string(getenv("INTEL_DEBUG"), debug_control);
if (INTEL_DEBUG & DEBUG_BUFMGR)
dri_bufmgr_set_debug(intel->bufmgr, true);
if (INTEL_DEBUG & DEBUG_PERF)
intel->perf_debug = true;
if (INTEL_DEBUG & DEBUG_AUB)
drm_intel_bufmgr_gem_set_aub_dump(intel->bufmgr, true);
intel_batchbuffer_init(intel);
intel_fbo_init(intel);
intel->prim.primitive = ~0;
/* Force all software fallbacks */
if (getenv("INTEL_NO_RAST")) {
fprintf(stderr, "disabling 3D rasterization\n");
intel->no_rast = 1;
}
if (driQueryOptionb(&intel->optionCache, "always_flush_batch")) {
fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
intel->always_flush_batch = 1;
}
if (driQueryOptionb(&intel->optionCache, "always_flush_cache")) {
fprintf(stderr, "flushing GPU caches before/after each draw call\n");
intel->always_flush_cache = 1;
}
if (driQueryOptionb(&intel->optionCache, "disable_throttling")) {
fprintf(stderr, "disabling flush throttling\n");
intel->disable_throttling = 1;
}
return true;
}
void
intelDestroyContext(__DRIcontext * driContextPriv)
{
struct intel_context *intel =
(struct intel_context *) driContextPriv->driverPrivate;
struct gl_context *ctx = &intel->ctx;
assert(intel); /* should never be null */
if (intel) {
INTEL_FIREVERTICES(intel);
/* Dump a final BMP in case the application doesn't call SwapBuffers */
if (INTEL_DEBUG & DEBUG_AUB) {
intel_batchbuffer_flush(intel);
aub_dump_bmp(&intel->ctx);
}
_mesa_meta_free(&intel->ctx);
intel->vtbl.destroy(intel);
if (ctx->swrast_context) {
_swsetup_DestroyContext(&intel->ctx);
_tnl_DestroyContext(&intel->ctx);
}
_vbo_DestroyContext(&intel->ctx);
if (ctx->swrast_context)
_swrast_DestroyContext(&intel->ctx);
intel->Fallback = 0x0; /* don't call _swrast_Flush later */
intel_batchbuffer_free(intel);
free(intel->prim.vb);
intel->prim.vb = NULL;
drm_intel_bo_unreference(intel->prim.vb_bo);
intel->prim.vb_bo = NULL;
drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
intel->first_post_swapbuffers_batch = NULL;
driDestroyOptionCache(&intel->optionCache);
/* free the Mesa context */
_mesa_free_context_data(&intel->ctx, true);
align_free(intel);
driContextPriv->driverPrivate = NULL;
}
}
GLboolean
intelUnbindContext(__DRIcontext * driContextPriv)
{
/* Unset current context and dispath table */
_mesa_make_current(NULL, NULL, NULL);
return true;
}
GLboolean
intelMakeCurrent(__DRIcontext * driContextPriv,
__DRIdrawable * driDrawPriv,
__DRIdrawable * driReadPriv)
{
struct intel_context *intel;
if (driContextPriv)
intel = (struct intel_context *) driContextPriv->driverPrivate;
else
intel = NULL;
if (driContextPriv) {
struct gl_context *ctx = &intel->ctx;
struct gl_framebuffer *fb, *readFb;
if (driDrawPriv == NULL && driReadPriv == NULL) {
fb = _mesa_get_incomplete_framebuffer();
readFb = _mesa_get_incomplete_framebuffer();
} else {
fb = driDrawPriv->driverPrivate;
readFb = driReadPriv->driverPrivate;
driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
}
intel_prepare_render(intel);
_mesa_make_current(ctx, fb, readFb);
/* We do this in intel_prepare_render() too, but intel->ctx.DrawBuffer
* is NULL at that point. We can't call _mesa_makecurrent()
* first, since we need the buffer size for the initial
* viewport. So just call intel_draw_buffer() again here. */
intel_draw_buffer(ctx);
}
else {
_mesa_make_current(NULL, NULL, NULL);
}
return true;
}
/**
* \brief Query DRI2 to obtain a DRIdrawable's buffers.
*
* To determine which DRI buffers to request, examine the renderbuffers
* attached to the drawable's framebuffer. Then request the buffers with
* DRI2GetBuffers() or DRI2GetBuffersWithFormat().
*
* This is called from intel_update_renderbuffers().
*
* \param drawable Drawable whose buffers are queried.
* \param buffers [out] List of buffers returned by DRI2 query.
* \param buffer_count [out] Number of buffers returned.
*
* \see intel_update_renderbuffers()
* \see DRI2GetBuffers()
* \see DRI2GetBuffersWithFormat()
*/
static void
intel_query_dri2_buffers(struct intel_context *intel,
__DRIdrawable *drawable,
__DRIbuffer **buffers,
int *buffer_count)
{
__DRIscreen *screen = intel->intelScreen->driScrnPriv;
struct gl_framebuffer *fb = drawable->driverPrivate;
int i = 0;
unsigned attachments[__DRI_BUFFER_COUNT];
struct intel_renderbuffer *front_rb;
struct intel_renderbuffer *back_rb;
front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
memset(attachments, 0, sizeof(attachments));
if ((_mesa_is_front_buffer_drawing(fb) ||
_mesa_is_front_buffer_reading(fb) ||
!back_rb) && front_rb) {
/* If a fake front buffer is in use, then querying for
* __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
* the real front buffer to the fake front buffer. So before doing the
* query, we need to make sure all the pending drawing has landed in the
* real front buffer.
*/
intel_flush(&intel->ctx);
intel_flush_front(&intel->ctx);
attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
attachments[i++] = intel_bits_per_pixel(front_rb);
} else if (front_rb && intel->front_buffer_dirty) {
/* We have pending front buffer rendering, but we aren't querying for a
* front buffer. If the front buffer we have is a fake front buffer,
* the X server is going to throw it away when it processes the query.
* So before doing the query, make sure all the pending drawing has
* landed in the real front buffer.
*/
intel_flush(&intel->ctx);
intel_flush_front(&intel->ctx);
}
if (back_rb) {
attachments[i++] = __DRI_BUFFER_BACK_LEFT;
attachments[i++] = intel_bits_per_pixel(back_rb);
}
assert(i <= ARRAY_SIZE(attachments));
*buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
&drawable->w,
&drawable->h,
attachments, i / 2,
buffer_count,
drawable->loaderPrivate);
}
/**
* \brief Assign a DRI buffer's DRM region to a renderbuffer.
*
* This is called from intel_update_renderbuffers().
*
* \par Note:
* DRI buffers whose attachment point is DRI2BufferStencil or
* DRI2BufferDepthStencil are handled as special cases.
*
* \param buffer_name is a human readable name, such as "dri2 front buffer",
* that is passed to intel_region_alloc_for_handle().
*
* \see intel_update_renderbuffers()
* \see intel_region_alloc_for_handle()
*/
static void
intel_process_dri2_buffer(struct intel_context *intel,
__DRIdrawable *drawable,
__DRIbuffer *buffer,
struct intel_renderbuffer *rb,
const char *buffer_name)
{
struct intel_region *region = NULL;
if (!rb)
return;
/* We try to avoid closing and reopening the same BO name, because the first
* use of a mapping of the buffer involves a bunch of page faulting which is
* moderately expensive.
*/
if (rb->mt &&
rb->mt->region &&
rb->mt->region->name == buffer->name)
return;
if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
fprintf(stderr,
"attaching buffer %d, at %d, cpp %d, pitch %d\n",
buffer->name, buffer->attachment,
buffer->cpp, buffer->pitch);
}
intel_miptree_release(&rb->mt);
region = intel_region_alloc_for_handle(intel->intelScreen,
buffer->cpp,
drawable->w,
drawable->h,
buffer->pitch,
buffer->name,
buffer_name);
if (!region)
return;
rb->mt = intel_miptree_create_for_dri2_buffer(intel,
buffer->attachment,
intel_rb_format(rb),
region);
intel_region_release(&region);
}
/**
* \brief Query DRI Image loader to obtain a DRIdrawable's buffers.
*
* To determine which DRI buffers to request, examine the renderbuffers
* attached to the drawable's framebuffer. Then request the buffers with
* dri3
*
* This is called from intel_update_renderbuffers().
*
* \param drawable Drawable whose buffers are queried.
* \param buffers [out] List of buffers returned by DRI2 query.
* \param buffer_count [out] Number of buffers returned.
*
* \see intel_update_renderbuffers()
*/
static void
intel_update_image_buffer(struct intel_context *intel,
__DRIdrawable *drawable,
struct intel_renderbuffer *rb,
__DRIimage *buffer,
enum __DRIimageBufferMask buffer_type)
{
struct intel_region *region = buffer->region;
if (!rb || !region)
return;
unsigned num_samples = rb->Base.Base.NumSamples;
if (rb->mt &&
rb->mt->region &&
rb->mt->region == region)
return;
intel_miptree_release(&rb->mt);
rb->mt = intel_miptree_create_for_image_buffer(intel,
buffer_type,
intel_rb_format(rb),
num_samples,
region);
}
static void
intel_update_image_buffers(struct intel_context *intel, __DRIdrawable *drawable)
{
struct gl_framebuffer *fb = drawable->driverPrivate;
__DRIscreen *screen = intel->intelScreen->driScrnPriv;
struct intel_renderbuffer *front_rb;
struct intel_renderbuffer *back_rb;
struct __DRIimageList images;
unsigned int format;
uint32_t buffer_mask = 0;
int ret;
front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
if (back_rb)
format = intel_rb_format(back_rb);
else if (front_rb)
format = intel_rb_format(front_rb);
else
return;
if (front_rb && (_mesa_is_front_buffer_drawing(fb) ||
_mesa_is_front_buffer_reading(fb) || !back_rb)) {
buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
}
if (back_rb)
buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
ret = screen->image.loader->getBuffers(drawable,
driGLFormatToImageFormat(format),
&drawable->dri2.stamp,
drawable->loaderPrivate,
buffer_mask,
&images);
if (!ret)
return;
if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
drawable->w = images.front->width;
drawable->h = images.front->height;
intel_update_image_buffer(intel,
drawable,
front_rb,
images.front,
__DRI_IMAGE_BUFFER_FRONT);
}
if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
drawable->w = images.back->width;
drawable->h = images.back->height;
intel_update_image_buffer(intel,
drawable,
back_rb,
images.back,
__DRI_IMAGE_BUFFER_BACK);
}
}

View File

@ -1,445 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTELCONTEXT_INC
#define INTELCONTEXT_INC
#include <stdbool.h>
#include <string.h>
#include "main/mtypes.h"
#include "main/errors.h"
#include "drm-uapi/drm.h"
#include <intel_bufmgr.h>
#include "drm-uapi/i915_drm.h"
#include "intel_screen.h"
#include "intel_tex_obj.h"
#include "tnl/t_vertex.h"
#define TAG(x) intel##x
#include "tnl_dd/t_dd_vertex.h"
#undef TAG
#define DV_PF_555 (1<<8)
#define DV_PF_565 (2<<8)
#define DV_PF_8888 (3<<8)
#define DV_PF_4444 (8<<8)
#define DV_PF_1555 (9<<8)
struct intel_region;
struct intel_context;
typedef void (*intel_tri_func) (struct intel_context *, intelVertex *,
intelVertex *, intelVertex *);
typedef void (*intel_line_func) (struct intel_context *, intelVertex *,
intelVertex *);
typedef void (*intel_point_func) (struct intel_context *, intelVertex *);
/**
* Bits for intel->Fallback field
*/
/*@{*/
#define INTEL_FALLBACK_DRAW_BUFFER 0x1
#define INTEL_FALLBACK_READ_BUFFER 0x2
#define INTEL_FALLBACK_DEPTH_BUFFER 0x4
#define INTEL_FALLBACK_STENCIL_BUFFER 0x8
#define INTEL_FALLBACK_USER 0x10
#define INTEL_FALLBACK_RENDERMODE 0x20
#define INTEL_FALLBACK_TEXTURE 0x40
#define INTEL_FALLBACK_DRIVER 0x1000 /**< first for drivers */
/*@}*/
extern void intelFallback(struct intel_context *intel, GLbitfield bit,
bool mode);
#define FALLBACK( intel, bit, mode ) intelFallback( intel, bit, mode )
#define INTEL_WRITE_PART 0x1
#define INTEL_WRITE_FULL 0x2
#define INTEL_READ 0x4
#ifndef likely
#ifdef __GNUC__
#define likely(expr) (__builtin_expect(expr, 1))
#define unlikely(expr) (__builtin_expect(expr, 0))
#else
#define likely(expr) (expr)
#define unlikely(expr) (expr)
#endif
#endif
struct intel_batchbuffer {
/** Current batchbuffer being queued up. */
drm_intel_bo *bo;
/** Last BO submitted to the hardware. Used for glFinish(). */
drm_intel_bo *last_bo;
uint16_t emit, total;
uint16_t used, reserved_space;
uint32_t *map;
uint32_t *cpu_map;
#define BATCH_SZ (8192*sizeof(uint32_t))
};
/**
* intel_context is derived from Mesa's context class: struct gl_context.
*/
struct intel_context
{
struct gl_context ctx; /**< base class, must be first field */
struct
{
void (*destroy) (struct intel_context * intel);
void (*emit_state) (struct intel_context * intel);
void (*finish_batch) (struct intel_context * intel);
void (*new_batch) (struct intel_context * intel);
void (*emit_invarient_state) (struct intel_context * intel);
void (*update_texture_state) (struct intel_context * intel);
void (*render_start) (struct intel_context * intel);
void (*render_prevalidate) (struct intel_context * intel);
void (*set_draw_region) (struct intel_context * intel,
struct intel_region * draw_regions[],
struct intel_region * depth_region,
GLuint num_regions);
void (*update_draw_buffer)(struct intel_context *intel);
void (*reduced_primitive_state) (struct intel_context * intel,
GLenum rprim);
bool (*check_vertex_size) (struct intel_context * intel,
GLuint expected);
void (*invalidate_state) (struct intel_context *intel,
GLuint new_state);
void (*assert_not_dirty) (struct intel_context *intel);
void (*debug_batch)(struct intel_context *intel);
void (*annotate_aub)(struct intel_context *intel);
bool (*render_target_supported)(struct intel_context *intel,
struct gl_renderbuffer *rb);
} vtbl;
GLbitfield Fallback; /**< mask of INTEL_FALLBACK_x bits */
GLuint NewGLState;
dri_bufmgr *bufmgr;
unsigned int maxBatchSize;
/**
* Generation number of the hardware: 2 is 8xx, 3 is 9xx pre-965, 4 is 965.
*/
int gen;
bool is_945;
struct intel_batchbuffer batch;
drm_intel_bo *first_post_swapbuffers_batch;
bool need_throttle;
bool no_batch_wrap;
bool tnl_pipeline_running; /**< Set while i915's _tnl_run_pipeline. */
/**
* Set if we're either a debug context or the INTEL_DEBUG=perf environment
* variable is set, this is the flag indicating to do expensive work that
* might lead to a perf_debug() call.
*/
bool perf_debug;
struct
{
GLuint id;
uint32_t start_ptr; /**< for i8xx */
uint32_t primitive; /**< Current hardware primitive type */
void (*flush) (struct intel_context *);
drm_intel_bo *vb_bo;
uint8_t *vb;
unsigned int start_offset; /**< Byte offset of primitive sequence */
unsigned int current_offset; /**< Byte offset of next vertex */
unsigned int count; /**< Number of vertices in current primitive */
} prim;
struct {
drm_intel_bo *bo;
GLuint offset;
uint32_t buffer_len;
uint32_t buffer_offset;
char buffer[4096];
} upload;
uint32_t max_gtt_map_object_size;
/* Offsets of fields within the current vertex:
*/
GLuint coloroffset;
GLuint specoffset;
GLuint wpos_offset;
struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
GLuint vertex_attr_count;
GLfloat polygon_offset_scale; /* dependent on depth_scale, bpp */
bool hw_stipple;
bool no_rast;
bool always_flush_batch;
bool always_flush_cache;
bool disable_throttling;
/* State for intelvb.c and inteltris.c.
*/
GLuint RenderIndex;
GLmatrix ViewportMatrix;
GLenum render_primitive;
GLenum reduced_primitive; /*< Only gen < 6 */
GLuint vertex_size;
GLubyte *verts; /* points to tnl->clipspace.vertex_buf */
/* Fallback rasterization functions
*/
intel_point_func draw_point;
intel_line_func draw_line;
intel_tri_func draw_tri;
/**
* Set if rendering has occurred to the drawable's front buffer.
*
* This is used in the DRI2 case to detect that glFlush should also copy
* the contents of the fake front buffer to the real front buffer.
*/
bool front_buffer_dirty;
__DRIcontext *driContext;
struct intel_screen *intelScreen;
/**
* Configuration cache
*/
driOptionCache optionCache;
};
extern char *__progname;
#define SUBPIXEL_X 0.125
#define SUBPIXEL_Y 0.125
#define INTEL_FIREVERTICES(intel) \
do { \
if ((intel)->prim.flush) \
(intel)->prim.flush(intel); \
} while (0)
/* ================================================================
* Debugging:
*/
extern int INTEL_DEBUG;
#define DEBUG_TEXTURE 0x1
#define DEBUG_STATE 0x2
#define DEBUG_BLIT 0x8
#define DEBUG_MIPTREE 0x10
#define DEBUG_PERF 0x20
#define DEBUG_BATCH 0x80
#define DEBUG_PIXEL 0x100
#define DEBUG_BUFMGR 0x200
#define DEBUG_REGION 0x400
#define DEBUG_FBO 0x800
#define DEBUG_SYNC 0x2000
#define DEBUG_DRI 0x10000
#define DEBUG_STATS 0x100000
#define DEBUG_WM 0x400000
#define DEBUG_AUB 0x4000000
#ifdef HAVE_ANDROID_PLATFORM
#define LOG_TAG "INTEL-MESA"
#if ANDROID_API_LEVEL >= 26
#include <log/log.h>
#else
#include <cutils/log.h>
#endif /* use log/log.h start from android 8 major version */
#ifndef ALOGW
#define ALOGW LOGW
#endif
#define dbg_printf(...) ALOGW(__VA_ARGS__)
#else
#define dbg_printf(...) printf(__VA_ARGS__)
#endif /* HAVE_ANDROID_PLATFORM */
#define DBG(...) do { \
if (unlikely(INTEL_DEBUG & FILE_DEBUG_FLAG)) \
dbg_printf(__VA_ARGS__); \
} while(0)
#define perf_debug(...) do { \
static GLuint msg_id = 0; \
if (unlikely(INTEL_DEBUG & DEBUG_PERF)) \
dbg_printf(__VA_ARGS__); \
if (intel->perf_debug) \
_mesa_gl_debugf(&intel->ctx, &msg_id, \
MESA_DEBUG_SOURCE_API, \
MESA_DEBUG_TYPE_PERFORMANCE, \
MESA_DEBUG_SEVERITY_MEDIUM, \
__VA_ARGS__); \
} while(0)
#define WARN_ONCE(cond, fmt...) do { \
if (unlikely(cond)) { \
static bool _warned = false; \
static GLuint msg_id = 0; \
if (!_warned) { \
fprintf(stderr, "WARNING: "); \
fprintf(stderr, fmt); \
_warned = true; \
\
_mesa_gl_debugf(ctx, &msg_id, \
MESA_DEBUG_SOURCE_API, \
MESA_DEBUG_TYPE_OTHER, \
MESA_DEBUG_SEVERITY_HIGH, fmt); \
} \
} \
} while (0)
/* ================================================================
* intel_context.c:
*/
extern const char *const i915_vendor_string;
extern const char *i915_get_renderer_string(unsigned deviceID);
extern bool intelInitContext(struct intel_context *intel,
int api,
unsigned major_version,
unsigned minor_version,
uint32_t flags,
const struct gl_config * mesaVis,
__DRIcontext * driContextPriv,
void *sharedContextPrivate,
struct dd_function_table *functions,
unsigned *dri_ctx_error);
extern void intelFinish(struct gl_context * ctx);
extern void intel_flush_rendering_to_batch(struct gl_context *ctx);
extern void _intel_flush(struct gl_context * ctx, const char *file, int line);
#define intel_flush(ctx) _intel_flush(ctx, __FILE__, __LINE__)
extern void intelInitDriverFunctions(struct dd_function_table *functions);
void intel_init_syncobj_functions(struct dd_function_table *functions);
/* ================================================================
* intel_state.c:
*/
#define COMPAREFUNC_ALWAYS 0
#define COMPAREFUNC_NEVER 0x1
#define COMPAREFUNC_LESS 0x2
#define COMPAREFUNC_EQUAL 0x3
#define COMPAREFUNC_LEQUAL 0x4
#define COMPAREFUNC_GREATER 0x5
#define COMPAREFUNC_NOTEQUAL 0x6
#define COMPAREFUNC_GEQUAL 0x7
#define STENCILOP_KEEP 0
#define STENCILOP_ZERO 0x1
#define STENCILOP_REPLACE 0x2
#define STENCILOP_INCRSAT 0x3
#define STENCILOP_DECRSAT 0x4
#define STENCILOP_INCR 0x5
#define STENCILOP_DECR 0x6
#define STENCILOP_INVERT 0x7
#define LOGICOP_CLEAR 0
#define LOGICOP_NOR 0x1
#define LOGICOP_AND_INV 0x2
#define LOGICOP_COPY_INV 0x3
#define LOGICOP_AND_RVRSE 0x4
#define LOGICOP_INV 0x5
#define LOGICOP_XOR 0x6
#define LOGICOP_NAND 0x7
#define LOGICOP_AND 0x8
#define LOGICOP_EQUIV 0x9
#define LOGICOP_NOOP 0xa
#define LOGICOP_OR_INV 0xb
#define LOGICOP_COPY 0xc
#define LOGICOP_OR_RVRSE 0xd
#define LOGICOP_OR 0xe
#define LOGICOP_SET 0xf
#define BLENDFACT_ZERO 0x01
#define BLENDFACT_ONE 0x02
#define BLENDFACT_SRC_COLR 0x03
#define BLENDFACT_INV_SRC_COLR 0x04
#define BLENDFACT_SRC_ALPHA 0x05
#define BLENDFACT_INV_SRC_ALPHA 0x06
#define BLENDFACT_DST_ALPHA 0x07
#define BLENDFACT_INV_DST_ALPHA 0x08
#define BLENDFACT_DST_COLR 0x09
#define BLENDFACT_INV_DST_COLR 0x0a
#define BLENDFACT_SRC_ALPHA_SATURATE 0x0b
#define BLENDFACT_CONST_COLOR 0x0c
#define BLENDFACT_INV_CONST_COLOR 0x0d
#define BLENDFACT_CONST_ALPHA 0x0e
#define BLENDFACT_INV_CONST_ALPHA 0x0f
#define BLENDFACT_MASK 0x0f
enum {
DRI_CONF_BO_REUSE_DISABLED,
DRI_CONF_BO_REUSE_ALL
};
extern int intel_translate_shadow_compare_func(GLenum func);
extern int intel_translate_compare_func(GLenum func);
extern int intel_translate_stencil_op(GLenum op);
extern int intel_translate_blend_factor(GLenum factor);
void intel_update_renderbuffers(__DRIcontext *context,
__DRIdrawable *drawable);
void intel_prepare_render(struct intel_context *intel);
void i915_set_buf_info_for_region(uint32_t *state, struct intel_region *region,
uint32_t buffer_id);
void intel_init_texture_formats(struct gl_context *ctx);
/*======================================================================
* Inline conversion functions.
* These are better-typed than the macros used previously:
*/
static inline struct intel_context *
intel_context(struct gl_context * ctx)
{
return (struct intel_context *) ctx;
}
#endif

View File

@ -1,104 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/version.h"
#include "intel_chipset.h"
#include "intel_context.h"
#include "intel_extensions.h"
#include "intel_reg.h"
#include "utils.h"
/**
* Initializes potential list of extensions if ctx == NULL, or actually enables
* extensions for a context.
*/
void
intelInitExtensions(struct gl_context *ctx)
{
struct intel_context *intel = intel_context(ctx);
assert(intel->gen == 2 || intel->gen == 3);
ctx->Extensions.ARB_draw_elements_base_vertex = true;
ctx->Extensions.ARB_explicit_attrib_location = true;
ctx->Extensions.ARB_explicit_uniform_location = true;
ctx->Extensions.ARB_framebuffer_object = true;
ctx->Extensions.ARB_internalformat_query = true;
ctx->Extensions.ARB_map_buffer_range = true;
ctx->Extensions.ARB_point_sprite = true;
ctx->Extensions.ARB_sync = true;
ctx->Extensions.ARB_texture_border_clamp = true;
ctx->Extensions.ARB_texture_cube_map = true;
ctx->Extensions.ARB_texture_env_combine = true;
ctx->Extensions.ARB_texture_env_crossbar = true;
ctx->Extensions.ARB_texture_env_dot3 = true;
ctx->Extensions.ARB_vertex_program = true;
ctx->Extensions.ARB_vertex_shader = true;
ctx->Extensions.EXT_blend_color = true;
ctx->Extensions.EXT_blend_equation_separate = true;
ctx->Extensions.EXT_blend_func_separate = true;
ctx->Extensions.EXT_blend_minmax = true;
ctx->Extensions.EXT_gpu_program_parameters = true;
ctx->Extensions.EXT_pixel_buffer_object = true;
ctx->Extensions.EXT_point_parameters = true;
ctx->Extensions.EXT_provoking_vertex = true;
ctx->Extensions.EXT_texture_env_dot3 = true;
ctx->Extensions.EXT_texture_filter_anisotropic = true;
ctx->Extensions.APPLE_object_purgeable = true;
ctx->Extensions.MESA_ycbcr_texture = true;
ctx->Extensions.NV_texture_rectangle = true;
ctx->Extensions.TDFX_texture_compression_FXT1 = true;
ctx->Extensions.OES_EGL_image = true;
ctx->Extensions.OES_draw_texture = true;
ctx->Const.GLSLVersion = 120;
ctx->Const.GLSLVersionCompat = 120;
_mesa_override_glsl_version(&ctx->Const);
if (intel->gen >= 3) {
ctx->Extensions.ARB_ES2_compatibility = true;
ctx->Extensions.ARB_depth_texture = true;
ctx->Extensions.ARB_fragment_program = true;
ctx->Extensions.ARB_shadow = true;
ctx->Extensions.ARB_texture_non_power_of_two = true;
ctx->Extensions.EXT_texture_sRGB = true;
ctx->Extensions.EXT_texture_sRGB_decode = true;
ctx->Extensions.EXT_stencil_two_side = true;
ctx->Extensions.ATI_texture_env_combine3 = true;
ctx->Extensions.NV_texture_env_combine4 = true;
if (driQueryOptionb(&intel->optionCache, "fragment_shader"))
ctx->Extensions.ARB_fragment_shader = true;
if (driQueryOptionb(&intel->optionCache, "stub_occlusion_query"))
ctx->Extensions.ARB_occlusion_query = true;
}
ctx->Extensions.EXT_texture_compression_s3tc = true;
ctx->Extensions.ANGLE_texture_compression_dxt = true;
}

View File

@ -1,42 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_EXTENSIONS_H
#define INTEL_EXTENSIONS_H
extern void
intelInitExtensions(struct gl_context *ctx);
extern void
intelInitExtensionsES1(struct gl_context *ctx);
extern void
intelInitExtensionsES2(struct gl_context *ctx);
#endif

View File

@ -1,776 +0,0 @@
/**************************************************************************
*
* Copyright 2006 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/enums.h"
#include "main/macros.h"
#include "main/mtypes.h"
#include "main/fbobject.h"
#include "main/framebuffer.h"
#include "main/renderbuffer.h"
#include "main/context.h"
#include "main/teximage.h"
#include "main/image.h"
#include "util/u_memory.h"
#include "swrast/swrast.h"
#include "drivers/common/meta.h"
#include "intel_context.h"
#include "intel_batchbuffer.h"
#include "intel_buffers.h"
#include "intel_blit.h"
#include "intel_fbo.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
#include "intel_tex.h"
#define FILE_DEBUG_FLAG DEBUG_FBO
static struct gl_renderbuffer *
intel_new_renderbuffer(struct gl_context * ctx, GLuint name);
struct intel_region*
intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
{
struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
if (irb && irb->mt)
return irb->mt->region;
else
return NULL;
}
/** Called by gl_renderbuffer::Delete() */
static void
intel_delete_renderbuffer(struct gl_context *ctx, struct gl_renderbuffer *rb)
{
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
assert(irb);
intel_miptree_release(&irb->mt);
_mesa_delete_renderbuffer(ctx, rb);
}
/**
* \see dd_function_table::MapRenderbuffer
*/
static void
intel_map_renderbuffer(struct gl_context *ctx,
struct gl_renderbuffer *rb,
GLuint x, GLuint y, GLuint w, GLuint h,
GLbitfield mode,
GLubyte **out_map,
GLint *out_stride,
bool flip_y)
{
struct intel_context *intel = intel_context(ctx);
struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
void *map;
int stride;
/* driver does not support GL_FRAMEBUFFER_FLIP_Y_MESA */
assert((rb->Name == 0) == flip_y);
if (srb->Buffer) {
/* this is a malloc'd renderbuffer (accum buffer), not an irb */
GLint bpp = _mesa_get_format_bytes(rb->Format);
GLint rowStride = srb->RowStride;
*out_map = (GLubyte *) srb->Buffer + y * rowStride + x * bpp;
*out_stride = rowStride;
return;
}
intel_prepare_render(intel);
/* For a window-system renderbuffer, we need to flip the mapping we receive
* upside-down. So we need to ask for a rectangle on flipped vertically, and
* we then return a pointer to the bottom of it with a negative stride.
*/
if (rb->Name == 0) {
y = rb->Height - y - h;
}
intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
x, y, w, h, mode, &map, &stride);
if (rb->Name == 0) {
map += (h - 1) * stride;
stride = -stride;
}
DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
__func__, rb->Name, _mesa_get_format_name(rb->Format),
x, y, w, h, map, stride);
*out_map = map;
*out_stride = stride;
}
/**
* \see dd_function_table::UnmapRenderbuffer
*/
static void
intel_unmap_renderbuffer(struct gl_context *ctx,
struct gl_renderbuffer *rb)
{
struct intel_context *intel = intel_context(ctx);
struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
DBG("%s: rb %d (%s)\n", __func__,
rb->Name, _mesa_get_format_name(rb->Format));
if (srb->Buffer) {
/* this is a malloc'd renderbuffer (accum buffer) */
/* nothing to do */
return;
}
intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
}
static mesa_format
intel_renderbuffer_format(struct gl_context * ctx, GLenum internalFormat)
{
struct intel_context *intel = intel_context(ctx);
switch (internalFormat) {
default:
/* Use the same format-choice logic as for textures.
* Renderbuffers aren't any different from textures for us,
* except they're less useful because you can't texture with
* them.
*/
return intel->ctx.Driver.ChooseTextureFormat(ctx, GL_TEXTURE_2D,
internalFormat,
GL_NONE, GL_NONE);
case GL_DEPTH_COMPONENT16:
return MESA_FORMAT_Z_UNORM16;
case GL_DEPTH_COMPONENT:
case GL_DEPTH_COMPONENT24:
case GL_DEPTH_COMPONENT32:
return MESA_FORMAT_Z24_UNORM_X8_UINT;
case GL_DEPTH_STENCIL_EXT:
case GL_DEPTH24_STENCIL8_EXT:
case GL_STENCIL_INDEX:
case GL_STENCIL_INDEX1_EXT:
case GL_STENCIL_INDEX4_EXT:
case GL_STENCIL_INDEX8_EXT:
case GL_STENCIL_INDEX16_EXT:
/* These aren't actual texture formats, so force them here. */
return MESA_FORMAT_Z24_UNORM_S8_UINT;
}
}
static GLboolean
intel_alloc_private_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
GLenum internalFormat,
GLuint width, GLuint height)
{
struct intel_context *intel = intel_context(ctx);
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
assert(rb->Format != MESA_FORMAT_NONE);
rb->Width = width;
rb->Height = height;
rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
intel_miptree_release(&irb->mt);
DBG("%s: %s: %s (%dx%d)\n", __func__,
_mesa_enum_to_string(internalFormat),
_mesa_get_format_name(rb->Format), width, height);
if (width == 0 || height == 0)
return true;
irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
width, height);
if (!irb->mt)
return false;
return true;
}
/**
* Called via glRenderbufferStorageEXT() to set the format and allocate
* storage for a user-created renderbuffer.
*/
static GLboolean
intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
GLenum internalFormat,
GLuint width, GLuint height)
{
rb->Format = intel_renderbuffer_format(ctx, internalFormat);
return intel_alloc_private_renderbuffer_storage(ctx, rb, internalFormat, width, height);
}
static void
intel_image_target_renderbuffer_storage(struct gl_context *ctx,
struct gl_renderbuffer *rb,
void *image_handle)
{
struct intel_context *intel = intel_context(ctx);
struct intel_renderbuffer *irb;
__DRIscreen *screen;
__DRIimage *image;
screen = intel->intelScreen->driScrnPriv;
image = screen->dri2.image->lookupEGLImage(screen, image_handle,
screen->loaderPrivate);
if (image == NULL)
return;
/* __DRIimage is opaque to the core so it has to be checked here */
switch (image->format) {
case MESA_FORMAT_R8G8B8A8_UNORM:
_mesa_error(&intel->ctx, GL_INVALID_OPERATION,
"glEGLImageTargetRenderbufferStorage(unsupported image format");
return;
break;
default:
break;
}
irb = intel_renderbuffer(rb);
intel_miptree_release(&irb->mt);
irb->mt = intel_miptree_create_for_bo(intel,
image->region->bo,
image->format,
image->offset,
image->region->width,
image->region->height,
image->region->pitch,
image->region->tiling);
if (!irb->mt)
return;
rb->InternalFormat = image->internal_format;
rb->Width = image->region->width;
rb->Height = image->region->height;
rb->Format = image->format;
rb->_BaseFormat = _mesa_get_format_base_format(image->format);
rb->NeedsFinishRenderTexture = true;
}
/**
* Called by _mesa_resize_framebuffer() for each hardware renderbuffer when a
* window system framebuffer is resized.
*
* Any actual buffer reallocations for hardware renderbuffers (which would
* have triggered _mesa_resize_framebuffer()) were done by
* intel_process_dri2_buffer().
*/
static GLboolean
intel_alloc_window_storage(UNUSED struct gl_context *ctx, struct gl_renderbuffer *rb,
GLenum internalFormat, GLuint width, GLuint height)
{
assert(rb->Name == 0);
rb->Width = width;
rb->Height = height;
rb->InternalFormat = internalFormat;
return true;
}
/** Dummy function for gl_renderbuffer::AllocStorage() */
static GLboolean
intel_nop_alloc_storage(UNUSED struct gl_context *ctx,
UNUSED struct gl_renderbuffer *rb,
UNUSED GLenum internalFormat,
UNUSED GLuint width, UNUSED GLuint height)
{
_mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
return false;
}
/**
* Create a new intel_renderbuffer which corresponds to an on-screen window,
* not a user-created renderbuffer.
*/
struct intel_renderbuffer *
intel_create_renderbuffer(mesa_format format)
{
struct intel_renderbuffer *irb;
struct gl_renderbuffer *rb;
GET_CURRENT_CONTEXT(ctx);
irb = CALLOC_STRUCT(intel_renderbuffer);
if (!irb) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
return NULL;
}
rb = &irb->Base.Base;
_mesa_init_renderbuffer(rb, 0);
rb->ClassID = INTEL_RB_CLASS;
rb->_BaseFormat = _mesa_get_format_base_format(format);
rb->Format = format;
rb->InternalFormat = rb->_BaseFormat;
/* intel-specific methods */
rb->Delete = intel_delete_renderbuffer;
rb->AllocStorage = intel_alloc_window_storage;
return irb;
}
/**
* Private window-system buffers (as opposed to ones shared with the display
* server created with intel_create_renderbuffer()) are most similar in their
* handling to user-created renderbuffers, but they have a resize handler that
* may be called at intel_update_renderbuffers() time.
*/
struct intel_renderbuffer *
intel_create_private_renderbuffer(mesa_format format)
{
struct intel_renderbuffer *irb;
irb = intel_create_renderbuffer(format);
irb->Base.Base.AllocStorage = intel_alloc_private_renderbuffer_storage;
return irb;
}
/**
* Create a new renderbuffer object.
* Typically called via glBindRenderbufferEXT().
*/
static struct gl_renderbuffer *
intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
{
/*struct intel_context *intel = intel_context(ctx); */
struct intel_renderbuffer *irb;
struct gl_renderbuffer *rb;
irb = CALLOC_STRUCT(intel_renderbuffer);
if (!irb) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
return NULL;
}
rb = &irb->Base.Base;
_mesa_init_renderbuffer(rb, name);
rb->ClassID = INTEL_RB_CLASS;
/* intel-specific methods */
rb->Delete = intel_delete_renderbuffer;
rb->AllocStorage = intel_alloc_renderbuffer_storage;
/* span routines set in alloc_storage function */
return rb;
}
/**
* Called via glBindFramebufferEXT().
*/
static void
intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
UNUSED struct gl_framebuffer *fb,
UNUSED struct gl_framebuffer *fbread)
{
if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
intel_draw_buffer(ctx);
}
else {
/* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
}
}
/**
* Called via glFramebufferRenderbufferEXT().
*/
static void
intel_framebuffer_renderbuffer(struct gl_context * ctx,
struct gl_framebuffer *fb,
GLenum attachment, struct gl_renderbuffer *rb)
{
DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
_mesa_FramebufferRenderbuffer_sw(ctx, fb, attachment, rb);
intel_draw_buffer(ctx);
}
static bool
intel_renderbuffer_update_wrapper(struct intel_renderbuffer *irb,
struct gl_texture_image *image,
uint32_t layer)
{
struct gl_renderbuffer *rb = &irb->Base.Base;
struct intel_texture_image *intel_image = intel_texture_image(image);
struct intel_mipmap_tree *mt = intel_image->mt;
int level = image->Level;
rb->AllocStorage = intel_nop_alloc_storage;
intel_miptree_check_level_layer(mt, level, layer);
irb->mt_level = level;
irb->mt_layer = layer;
intel_miptree_reference(&irb->mt, mt);
intel_renderbuffer_set_draw_offset(irb);
return true;
}
void
intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
{
unsigned int dst_x, dst_y;
/* compute offset of the particular 2D image within the texture region */
intel_miptree_get_image_offset(irb->mt,
irb->mt_level,
irb->mt_layer,
&dst_x, &dst_y);
irb->draw_x = dst_x;
irb->draw_y = dst_y;
}
/**
* Called by glFramebufferTexture[123]DEXT() (and other places) to
* prepare for rendering into texture memory. This might be called
* many times to choose different texture levels, cube faces, etc
* before intel_finish_render_texture() is ever called.
*/
static void
intel_render_texture(struct gl_context * ctx,
struct gl_framebuffer *fb,
struct gl_renderbuffer_attachment *att)
{
struct gl_renderbuffer *rb = att->Renderbuffer;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
struct gl_texture_image *image = rb->TexImage;
struct intel_texture_image *intel_image = intel_texture_image(image);
struct intel_mipmap_tree *mt = intel_image->mt;
int layer;
(void) fb;
if (att->CubeMapFace > 0) {
assert(att->Zoffset == 0);
layer = att->CubeMapFace;
} else {
layer = att->Zoffset;
}
if (!intel_image->mt) {
/* Fallback on drawing to a texture that doesn't have a miptree
* (has a border, width/height 0, etc.)
*/
_swrast_render_texture(ctx, fb, att);
return;
}
intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
if (!intel_renderbuffer_update_wrapper(irb, image, layer)) {
_swrast_render_texture(ctx, fb, att);
return;
}
DBG("Begin render %s texture tex=%u w=%d h=%d d=%d refcount=%d\n",
_mesa_get_format_name(image->TexFormat),
att->Texture->Name, image->Width, image->Height, image->Depth,
rb->RefCount);
/* update drawing region, etc */
intel_draw_buffer(ctx);
}
/**
* Called by Mesa when rendering to a texture is done.
*/
static void
intel_finish_render_texture(struct gl_context * ctx, struct gl_renderbuffer *rb)
{
struct intel_context *intel = intel_context(ctx);
DBG("Finish render %s texture\n", _mesa_get_format_name(rb->Format));
/* Since we've (probably) rendered to the texture and will (likely) use
* it in the texture domain later on in this batchbuffer, flush the
* batch. Once again, we wish for a domain tracker in libdrm to cover
* usage inside of a batchbuffer like GEM does in the kernel.
*/
intel_batchbuffer_emit_mi_flush(intel);
}
#define fbo_incomplete(fb, ...) do { \
static GLuint msg_id = 0; \
if (unlikely(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT)) { \
_mesa_gl_debugf(ctx, &msg_id, \
MESA_DEBUG_SOURCE_API, \
MESA_DEBUG_TYPE_OTHER, \
MESA_DEBUG_SEVERITY_MEDIUM, \
__VA_ARGS__); \
} \
DBG(__VA_ARGS__); \
fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED; \
} while (0)
/**
* Do additional "completeness" testing of a framebuffer object.
*/
static void
intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
{
struct intel_context *intel = intel_context(ctx);
struct intel_renderbuffer *depthRb =
intel_get_renderbuffer(fb, BUFFER_DEPTH);
struct intel_renderbuffer *stencilRb =
intel_get_renderbuffer(fb, BUFFER_STENCIL);
struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL;
int i;
DBG("%s() on fb %p (%s)\n", __func__,
fb, (fb == ctx->DrawBuffer ? "drawbuffer" :
(fb == ctx->ReadBuffer ? "readbuffer" : "other buffer")));
if (depthRb)
depth_mt = depthRb->mt;
if (stencilRb)
stencil_mt = stencilRb->mt;
if (depth_mt && stencil_mt) {
/* Make sure that the depth and stencil buffers are actually the same
* slice of the same miptree, since we only support packed
* depth/stencil.
*/
if (depth_mt == stencil_mt) {
if (depthRb->mt_level != stencilRb->mt_level ||
depthRb->mt_layer != stencilRb->mt_layer) {
fbo_incomplete(fb,
"FBO incomplete: depth image level/layer %d/%d != "
"stencil image %d/%d\n",
depthRb->mt_level,
depthRb->mt_layer,
stencilRb->mt_level,
stencilRb->mt_layer);
}
} else {
fbo_incomplete(fb, "FBO incomplete: separate stencil unsupported\n");
}
}
for (i = 0; i < ARRAY_SIZE(fb->Attachment); i++) {
struct gl_renderbuffer *rb;
struct intel_renderbuffer *irb;
if (fb->Attachment[i].Type == GL_NONE)
continue;
/* A supported attachment will have a Renderbuffer set either
* from being a Renderbuffer or being a texture that got the
* intel_wrap_texture() treatment.
*/
rb = fb->Attachment[i].Renderbuffer;
if (rb == NULL) {
fbo_incomplete(fb, "FBO incomplete: attachment without "
"renderbuffer\n");
continue;
}
if (fb->Attachment[i].Type == GL_TEXTURE) {
if (rb->TexImage->Border) {
fbo_incomplete(fb, "FBO incomplete: texture with border\n");
continue;
}
}
irb = intel_renderbuffer(rb);
if (irb == NULL) {
fbo_incomplete(fb, "FBO incomplete: software rendering "
"renderbuffer\n");
continue;
}
if (!intel->vtbl.render_target_supported(intel, rb)) {
fbo_incomplete(fb, "FBO incomplete: Unsupported HW "
"texture/renderbuffer format attached: %s\n",
_mesa_get_format_name(intel_rb_format(irb)));
}
}
}
/**
* Try to do a glBlitFramebuffer using glCopyTexSubImage2D
* We can do this when the dst renderbuffer is actually a texture and
* there is no scaling, mirroring or scissoring.
*
* \return new buffer mask indicating the buffers left to blit using the
* normal path.
*/
static GLbitfield
intel_blit_framebuffer_with_blitter(struct gl_context *ctx,
const struct gl_framebuffer *readFb,
const struct gl_framebuffer *drawFb,
GLint srcX0, GLint srcY0,
GLint srcX1, GLint srcY1,
GLint dstX0, GLint dstY0,
GLint dstX1, GLint dstY1,
GLbitfield mask)
{
struct intel_context *intel = intel_context(ctx);
/* Sync up the state of window system buffers. We need to do this before
* we go looking for the buffers.
*/
intel_prepare_render(intel);
if (mask & GL_COLOR_BUFFER_BIT) {
GLint i;
struct gl_renderbuffer *src_rb = readFb->_ColorReadBuffer;
struct intel_renderbuffer *src_irb = intel_renderbuffer(src_rb);
if (!src_irb) {
perf_debug("glBlitFramebuffer(): missing src renderbuffer. "
"Falling back to software rendering.\n");
return mask;
}
/* If the source and destination are the same size with no mirroring,
* the rectangles are within the size of the texture and there is no
* scissor, then we can probably use the blit engine.
*/
if (!(srcX0 - srcX1 == dstX0 - dstX1 &&
srcY0 - srcY1 == dstY0 - dstY1 &&
srcX1 >= srcX0 &&
srcY1 >= srcY0 &&
srcX0 >= 0 && srcX1 <= readFb->Width &&
srcY0 >= 0 && srcY1 <= readFb->Height &&
dstX0 >= 0 && dstX1 <= drawFb->Width &&
dstY0 >= 0 && dstY1 <= drawFb->Height &&
!ctx->Scissor.EnableFlags)) {
perf_debug("glBlitFramebuffer(): non-1:1 blit. "
"Falling back to software rendering.\n");
return mask;
}
/* Blit to all active draw buffers. We don't do any pre-checking,
* because we assume that copying to MRTs is rare, and failure midway
* through copying is even more rare. Even if it was to occur, it's
* safe to let meta start the copy over from scratch, because
* glBlitFramebuffer completely overwrites the destination pixels, and
* results are undefined if any destination pixels have a dependency on
* source pixels.
*/
for (i = 0; i < drawFb->_NumColorDrawBuffers; i++) {
struct gl_renderbuffer *dst_rb = drawFb->_ColorDrawBuffers[i];
struct intel_renderbuffer *dst_irb = intel_renderbuffer(dst_rb);
if (!dst_irb) {
perf_debug("glBlitFramebuffer(): missing dst renderbuffer. "
"Falling back to software rendering.\n");
return mask;
}
mesa_format src_format = _mesa_get_srgb_format_linear(src_rb->Format);
mesa_format dst_format = _mesa_get_srgb_format_linear(dst_rb->Format);
if (src_format != dst_format) {
perf_debug("glBlitFramebuffer(): unsupported blit from %s to %s. "
"Falling back to software rendering.\n",
_mesa_get_format_name(src_format),
_mesa_get_format_name(dst_format));
return mask;
}
if (!intel_miptree_blit(intel,
src_irb->mt,
src_irb->mt_level, src_irb->mt_layer,
srcX0, srcY0, src_rb->Name == 0,
dst_irb->mt,
dst_irb->mt_level, dst_irb->mt_layer,
dstX0, dstY0, dst_rb->Name == 0,
dstX1 - dstX0, dstY1 - dstY0, COLOR_LOGICOP_COPY)) {
perf_debug("glBlitFramebuffer(): unknown blit failure. "
"Falling back to software rendering.\n");
return mask;
}
}
mask &= ~GL_COLOR_BUFFER_BIT;
}
return mask;
}
static void
intel_blit_framebuffer(struct gl_context *ctx,
struct gl_framebuffer *readFb,
struct gl_framebuffer *drawFb,
GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
GLbitfield mask, GLenum filter)
{
/* Try using the BLT engine. */
mask = intel_blit_framebuffer_with_blitter(ctx, readFb, drawFb,
srcX0, srcY0, srcX1, srcY1,
dstX0, dstY0, dstX1, dstY1,
mask);
if (mask == 0x0)
return;
_mesa_meta_and_swrast_BlitFramebuffer(ctx, readFb, drawFb,
srcX0, srcY0, srcX1, srcY1,
dstX0, dstY0, dstX1, dstY1,
mask, filter);
}
/**
* Do one-time context initializations related to GL_EXT_framebuffer_object.
* Hook in device driver functions.
*/
void
intel_fbo_init(struct intel_context *intel)
{
intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
intel->ctx.Driver.RenderTexture = intel_render_texture;
intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
intel_image_target_renderbuffer_storage;
}

View File

@ -1,157 +0,0 @@
/**************************************************************************
*
* Copyright 2006 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_FBO_H
#define INTEL_FBO_H
#include <stdbool.h>
#include <assert.h>
#include "main/formats.h"
#include "main/macros.h"
#include "intel_context.h"
#include "intel_mipmap_tree.h"
#include "intel_screen.h"
struct intel_context;
struct intel_mipmap_tree;
struct intel_texture_image;
/**
* Intel renderbuffer, derived from gl_renderbuffer.
*/
struct intel_renderbuffer
{
struct swrast_renderbuffer Base;
struct intel_mipmap_tree *mt; /**< The renderbuffer storage. */
/**
* \name Miptree view
* \{
*
* Multiple renderbuffers may simultaneously wrap a single texture and each
* provide a different view into that texture. The fields below indicate
* which miptree slice is wrapped by this renderbuffer. The fields' values
* are consistent with the 'level' and 'layer' parameters of
* glFramebufferTextureLayer().
*
* For renderbuffers not created with glFramebufferTexture*(), mt_level and
* mt_layer are 0.
*/
unsigned int mt_level;
unsigned int mt_layer;
/** \} */
GLuint draw_x, draw_y; /**< Offset of drawing within the region */
};
/**
* gl_renderbuffer is a base class which we subclass. The Class field
* is used for simple run-time type checking.
*/
#define INTEL_RB_CLASS 0x12345678
/**
* Return a gl_renderbuffer ptr casted to intel_renderbuffer.
* NULL will be returned if the rb isn't really an intel_renderbuffer.
* This is determined by checking the ClassID.
*/
static inline struct intel_renderbuffer *
intel_renderbuffer(struct gl_renderbuffer *rb)
{
struct intel_renderbuffer *irb = (struct intel_renderbuffer *) rb;
if (irb && irb->Base.Base.ClassID == INTEL_RB_CLASS)
return irb;
else
return NULL;
}
/**
* \brief Return the framebuffer attachment specified by attIndex.
*
* If the framebuffer lacks the specified attachment, then return null.
*
* If the attached renderbuffer is a wrapper, then return wrapped
* renderbuffer.
*/
static inline struct intel_renderbuffer *
intel_get_renderbuffer(struct gl_framebuffer *fb, gl_buffer_index attIndex)
{
struct gl_renderbuffer *rb;
assert((unsigned)attIndex < ARRAY_SIZE(fb->Attachment));
rb = fb->Attachment[attIndex].Renderbuffer;
if (!rb)
return NULL;
return intel_renderbuffer(rb);
}
static inline mesa_format
intel_rb_format(const struct intel_renderbuffer *rb)
{
return rb->Base.Base.Format;
}
extern struct intel_renderbuffer *
intel_create_renderbuffer(mesa_format format);
struct intel_renderbuffer *
intel_create_private_renderbuffer(mesa_format format);
struct gl_renderbuffer*
intel_create_wrapped_renderbuffer(struct gl_context * ctx,
int width, int height,
mesa_format format);
extern void
intel_fbo_init(struct intel_context *intel);
extern void
intel_flip_renderbuffers(struct gl_framebuffer *fb);
void
intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb);
static inline uint32_t
intel_renderbuffer_get_tile_offsets(struct intel_renderbuffer *irb,
uint32_t *tile_x,
uint32_t *tile_y)
{
return intel_miptree_get_tile_offsets(irb->mt, irb->mt_level, irb->mt_layer,
tile_x, tile_y);
}
struct intel_region*
intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex);
#endif /* INTEL_FBO_H */

View File

@ -1,910 +0,0 @@
/**************************************************************************
*
* Copyright 2006 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <GL/gl.h>
#include <GL/internal/dri_interface.h>
#include "intel_batchbuffer.h"
#include "intel_chipset.h"
#include "intel_context.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
#include "intel_tex_layout.h"
#include "intel_tex.h"
#include "intel_blit.h"
#include "main/enums.h"
#include "main/formats.h"
#include "main/glformats.h"
#include "main/teximage.h"
#define FILE_DEBUG_FLAG DEBUG_MIPTREE
static GLenum
target_to_target(GLenum target)
{
switch (target) {
case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
return GL_TEXTURE_CUBE_MAP_ARB;
default:
return target;
}
}
struct intel_mipmap_tree *
intel_miptree_create_layout(struct intel_context *intel,
GLenum target,
mesa_format format,
GLuint first_level,
GLuint last_level,
GLuint width0,
GLuint height0,
GLuint depth0)
{
struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
if (!mt)
return NULL;
DBG("%s target %s format %s level %d..%d <-- %p\n", __func__,
_mesa_enum_to_string(target),
_mesa_get_format_name(format),
first_level, last_level, mt);
mt->target = target_to_target(target);
mt->format = format;
mt->first_level = first_level;
mt->last_level = last_level;
/* The cpp is bytes per (1, blockheight)-sized block for compressed
* textures. This is why you'll see divides by blockheight all over
*/
unsigned bw, bh;
_mesa_get_format_block_size(format, &bw, &bh);
assert(_mesa_get_format_bytes(mt->format) % bw == 0);
mt->cpp = _mesa_get_format_bytes(mt->format) / bw;
mt->compressed = _mesa_is_format_compressed(format);
mt->refcount = 1;
if (target == GL_TEXTURE_CUBE_MAP) {
assert(depth0 == 1);
depth0 = 6;
}
mt->physical_width0 = width0;
mt->physical_height0 = height0;
mt->physical_depth0 = depth0;
intel_get_texture_alignment_unit(intel, mt->format,
&mt->align_w, &mt->align_h);
if (intel->is_945)
i945_miptree_layout(mt);
else
i915_miptree_layout(mt);
return mt;
}
/**
* \brief Helper function for intel_miptree_create().
*/
static uint32_t
intel_miptree_choose_tiling(struct intel_context *intel,
mesa_format format,
uint32_t width0,
enum intel_miptree_tiling_mode requested,
struct intel_mipmap_tree *mt)
{
/* Some usages may want only one type of tiling, like depth miptrees (Y
* tiled), or temporary BOs for uploading data once (linear).
*/
switch (requested) {
case INTEL_MIPTREE_TILING_ANY:
break;
case INTEL_MIPTREE_TILING_Y:
return I915_TILING_Y;
case INTEL_MIPTREE_TILING_NONE:
return I915_TILING_NONE;
}
int minimum_pitch = mt->total_width * mt->cpp;
/* If the width is much smaller than a tile, don't bother tiling. */
if (minimum_pitch < 64)
return I915_TILING_NONE;
if (ALIGN(minimum_pitch, 512) >= 32768) {
perf_debug("%dx%d miptree too large to blit, falling back to untiled",
mt->total_width, mt->total_height);
return I915_TILING_NONE;
}
/* We don't have BLORP to handle Y-tiled blits, so use X-tiling. */
return I915_TILING_X;
}
struct intel_mipmap_tree *
intel_miptree_create(struct intel_context *intel,
GLenum target,
mesa_format format,
GLuint first_level,
GLuint last_level,
GLuint width0,
GLuint height0,
GLuint depth0,
bool expect_accelerated_upload,
enum intel_miptree_tiling_mode requested_tiling)
{
struct intel_mipmap_tree *mt;
GLuint total_width, total_height;
mt = intel_miptree_create_layout(intel, target, format,
first_level, last_level, width0,
height0, depth0);
/* pitch == 0 || height == 0 indicates the null texture */
if (!mt || !mt->total_width || !mt->total_height) {
intel_miptree_release(&mt);
return NULL;
}
total_width = mt->total_width;
total_height = mt->total_height;
uint32_t tiling = intel_miptree_choose_tiling(intel, format, width0,
requested_tiling,
mt);
bool y_or_x = tiling == (I915_TILING_Y | I915_TILING_X);
mt->region = intel_region_alloc(intel->intelScreen,
y_or_x ? I915_TILING_Y : tiling,
mt->cpp,
total_width,
total_height,
expect_accelerated_upload);
/* If the region is too large to fit in the aperture, we need to use the
* BLT engine to support it. The BLT paths can't currently handle Y-tiling,
* so we need to fall back to X.
*/
if (y_or_x && mt->region->bo->size >= intel->max_gtt_map_object_size) {
perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
mt->total_width, mt->total_height);
intel_region_release(&mt->region);
mt->region = intel_region_alloc(intel->intelScreen,
I915_TILING_X,
mt->cpp,
total_width,
total_height,
expect_accelerated_upload);
}
mt->offset = 0;
if (!mt->region) {
intel_miptree_release(&mt);
return NULL;
}
return mt;
}
struct intel_mipmap_tree *
intel_miptree_create_for_bo(struct intel_context *intel,
drm_intel_bo *bo,
mesa_format format,
uint32_t offset,
uint32_t width,
uint32_t height,
int pitch,
uint32_t tiling)
{
struct intel_mipmap_tree *mt;
struct intel_region *region = calloc(1, sizeof(*region));
if (!region)
return NULL;
/* Nothing will be able to use this miptree with the BO if the offset isn't
* aligned.
*/
if (tiling != I915_TILING_NONE)
assert(offset % 4096 == 0);
/* miptrees can't handle negative pitch. If you need flipping of images,
* that's outside of the scope of the mt.
*/
assert(pitch >= 0);
mt = intel_miptree_create_layout(intel, GL_TEXTURE_2D, format,
0, 0,
width, height, 1);
if (!mt) {
free(region);
return mt;
}
region->cpp = mt->cpp;
region->width = width;
region->height = height;
region->pitch = pitch;
region->refcount = 1;
drm_intel_bo_reference(bo);
region->bo = bo;
region->tiling = tiling;
mt->region = region;
mt->offset = offset;
return mt;
}
/**
* Wraps the given region with a miptree.
*/
struct intel_mipmap_tree *
intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
unsigned dri_attachment,
mesa_format format,
struct intel_region *region)
{
struct intel_mipmap_tree *mt = NULL;
/* Only the front and back buffers, which are color buffers, are shared
* through DRI2.
*/
assert(dri_attachment == __DRI_BUFFER_BACK_LEFT ||
dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT);
assert(_mesa_get_format_base_format(format) == GL_RGB ||
_mesa_get_format_base_format(format) == GL_RGBA);
mt = intel_miptree_create_for_bo(intel,
region->bo,
format,
0,
region->width,
region->height,
region->pitch,
region->tiling);
if (!mt)
return NULL;
mt->region->name = region->name;
return mt;
}
/**
* Wraps the given region with a miptree.
*/
struct intel_mipmap_tree *
intel_miptree_create_for_image_buffer(struct intel_context *intel,
enum __DRIimageBufferMask buffer_type,
mesa_format format,
uint32_t num_samples,
struct intel_region *region)
{
struct intel_mipmap_tree *mt = NULL;
/* Only the front and back buffers, which are color buffers, are allocated
* through the image loader.
*/
assert(_mesa_get_format_base_format(format) == GL_RGB ||
_mesa_get_format_base_format(format) == GL_RGBA);
mt = intel_miptree_create_for_bo(intel,
region->bo,
format,
0,
region->width,
region->height,
region->pitch,
region->tiling);
return mt;
}
struct intel_mipmap_tree *
intel_miptree_create_for_renderbuffer(struct intel_context *intel,
mesa_format format,
uint32_t width,
uint32_t height)
{
uint32_t depth = 1;
return intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
width, height, depth, true,
INTEL_MIPTREE_TILING_ANY);
}
void
intel_miptree_reference(struct intel_mipmap_tree **dst,
struct intel_mipmap_tree *src)
{
if (*dst == src)
return;
intel_miptree_release(dst);
if (src) {
src->refcount++;
DBG("%s %p refcount now %d\n", __func__, src, src->refcount);
}
*dst = src;
}
void
intel_miptree_release(struct intel_mipmap_tree **mt)
{
if (!*mt)
return;
DBG("%s %p refcount will be %d\n", __func__, *mt, (*mt)->refcount - 1);
if (--(*mt)->refcount <= 0) {
GLuint i;
DBG("%s deleting %p\n", __func__, *mt);
intel_region_release(&((*mt)->region));
for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
free((*mt)->level[i].slice);
}
free(*mt);
}
*mt = NULL;
}
void
intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
int *width, int *height, int *depth)
{
switch (image->TexObject->Target) {
default:
*width = image->Width;
*height = image->Height;
*depth = image->Depth;
break;
}
}
/**
* Can the image be pulled into a unified mipmap tree? This mirrors
* the completeness test in a lot of ways.
*
* Not sure whether I want to pass gl_texture_image here.
*/
bool
intel_miptree_match_image(struct intel_mipmap_tree *mt,
struct gl_texture_image *image)
{
struct intel_texture_image *intelImage = intel_texture_image(image);
GLuint level = intelImage->base.Base.Level;
int width, height, depth;
/* glTexImage* choose the texture object based on the target passed in, and
* objects can't change targets over their lifetimes, so this should be
* true.
*/
assert(target_to_target(image->TexObject->Target) == mt->target);
mesa_format mt_format = mt->format;
if (image->TexFormat != mt_format)
return false;
intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
if (mt->target == GL_TEXTURE_CUBE_MAP)
depth = 6;
/* Test image dimensions against the base level image adjusted for
* minification. This will also catch images not present in the
* tree, changed targets, etc.
*/
if (width != mt->level[level].width ||
height != mt->level[level].height ||
depth != mt->level[level].depth) {
return false;
}
return true;
}
void
intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
GLuint level,
GLuint x, GLuint y,
GLuint w, GLuint h, GLuint d)
{
mt->level[level].width = w;
mt->level[level].height = h;
mt->level[level].depth = d;
mt->level[level].level_x = x;
mt->level[level].level_y = y;
DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __func__,
level, w, h, d, x, y);
assert(mt->level[level].slice == NULL);
mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
mt->level[level].slice[0].x_offset = mt->level[level].level_x;
mt->level[level].slice[0].y_offset = mt->level[level].level_y;
}
void
intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
GLuint level, GLuint img,
GLuint x, GLuint y)
{
if (img == 0 && level == 0)
assert(x == 0 && y == 0);
assert(img < mt->level[level].depth);
mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
DBG("%s level %d img %d pos %d,%d\n",
__func__, level, img,
mt->level[level].slice[img].x_offset,
mt->level[level].slice[img].y_offset);
}
void
intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
GLuint level, GLuint slice,
GLuint *x, GLuint *y)
{
assert(slice < mt->level[level].depth);
*x = mt->level[level].slice[slice].x_offset;
*y = mt->level[level].slice[slice].y_offset;
}
/**
* Rendering with tiled buffers requires that the base address of the buffer
* be aligned to a page boundary. For renderbuffers, and sometimes with
* textures, we may want the surface to point at a texture image level that
* isn't at a page boundary.
*
* This function returns an appropriately-aligned base offset
* according to the tiling restrictions, plus any required x/y offset
* from there.
*/
uint32_t
intel_miptree_get_tile_offsets(struct intel_mipmap_tree *mt,
GLuint level, GLuint slice,
uint32_t *tile_x,
uint32_t *tile_y)
{
struct intel_region *region = mt->region;
uint32_t x, y;
uint32_t mask_x, mask_y;
intel_region_get_tile_masks(region, &mask_x, &mask_y);
intel_miptree_get_image_offset(mt, level, slice, &x, &y);
*tile_x = x & mask_x;
*tile_y = y & mask_y;
return intel_region_get_aligned_offset(region, x & ~mask_x, y & ~mask_y);
}
static void
intel_miptree_copy_slice_sw(struct intel_context *intel,
struct intel_mipmap_tree *dst_mt,
struct intel_mipmap_tree *src_mt,
int level,
int slice,
int width,
int height)
{
void *src, *dst;
int src_stride, dst_stride;
int cpp = dst_mt->cpp;
intel_miptree_map(intel, src_mt,
level, slice,
0, 0,
width, height,
GL_MAP_READ_BIT,
&src, &src_stride);
intel_miptree_map(intel, dst_mt,
level, slice,
0, 0,
width, height,
GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT,
&dst, &dst_stride);
DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n",
_mesa_get_format_name(src_mt->format),
src_mt, src, src_stride,
_mesa_get_format_name(dst_mt->format),
dst_mt, dst, dst_stride,
width, height);
int row_size = cpp * width;
if (src_stride == row_size &&
dst_stride == row_size) {
memcpy(dst, src, row_size * height);
} else {
for (int i = 0; i < height; i++) {
memcpy(dst, src, row_size);
dst += dst_stride;
src += src_stride;
}
}
intel_miptree_unmap(intel, dst_mt, level, slice);
intel_miptree_unmap(intel, src_mt, level, slice);
}
static void
intel_miptree_copy_slice(struct intel_context *intel,
struct intel_mipmap_tree *dst_mt,
struct intel_mipmap_tree *src_mt,
int level,
int face,
int depth)
{
mesa_format format = src_mt->format;
uint32_t width = src_mt->level[level].width;
uint32_t height = src_mt->level[level].height;
int slice;
if (face > 0)
slice = face;
else
slice = depth;
assert(depth < src_mt->level[level].depth);
assert(src_mt->format == dst_mt->format);
if (dst_mt->compressed) {
height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
width = ALIGN(width, dst_mt->align_w);
}
uint32_t dst_x, dst_y, src_x, src_y;
intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y);
intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y);
DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
_mesa_get_format_name(src_mt->format),
src_mt, src_x, src_y, src_mt->region->pitch,
_mesa_get_format_name(dst_mt->format),
dst_mt, dst_x, dst_y, dst_mt->region->pitch,
width, height);
if (!intel_miptree_blit(intel,
src_mt, level, slice, 0, 0, false,
dst_mt, level, slice, 0, 0, false,
width, height, COLOR_LOGICOP_COPY)) {
perf_debug("miptree validate blit for %s failed\n",
_mesa_get_format_name(format));
intel_miptree_copy_slice_sw(intel, dst_mt, src_mt, level, slice,
width, height);
}
}
/**
* Copies the image's current data to the given miptree, and associates that
* miptree with the image.
*
* If \c invalidate is true, then the actual image data does not need to be
* copied, but the image still needs to be associated to the new miptree (this
* is set to true if we're about to clear the image).
*/
void
intel_miptree_copy_teximage(struct intel_context *intel,
struct intel_texture_image *intelImage,
struct intel_mipmap_tree *dst_mt,
bool invalidate)
{
struct intel_mipmap_tree *src_mt = intelImage->mt;
struct intel_texture_object *intel_obj =
intel_texture_object(intelImage->base.Base.TexObject);
int level = intelImage->base.Base.Level;
int face = intelImage->base.Base.Face;
GLuint depth = intelImage->base.Base.Depth;
if (!invalidate) {
for (int slice = 0; slice < depth; slice++) {
intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
}
}
intel_miptree_reference(&intelImage->mt, dst_mt);
intel_obj->needs_validate = true;
}
void *
intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt)
{
drm_intel_bo *bo = mt->region->bo;
if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
if (drm_intel_bo_busy(bo)) {
perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
}
}
intel_flush(&intel->ctx);
if (mt->region->tiling != I915_TILING_NONE)
drm_intel_gem_bo_map_gtt(bo);
else
drm_intel_bo_map(bo, true);
return bo->virtual;
}
void
intel_miptree_unmap_raw(struct intel_mipmap_tree *mt)
{
drm_intel_bo_unmap(mt->region->bo);
}
static void
intel_miptree_map_gtt(struct intel_context *intel,
struct intel_mipmap_tree *mt,
struct intel_miptree_map *map,
unsigned int level, unsigned int slice)
{
unsigned int bw, bh;
void *base;
unsigned int image_x, image_y;
int x = map->x;
int y = map->y;
/* For compressed formats, the stride is the number of bytes per
* row of blocks. intel_miptree_get_image_offset() already does
* the divide.
*/
_mesa_get_format_block_size(mt->format, &bw, &bh);
assert(y % bh == 0);
y /= bh;
base = intel_miptree_map_raw(intel, mt) + mt->offset;
if (base == NULL)
map->ptr = NULL;
else {
/* Note that in the case of cube maps, the caller must have passed the
* slice number referencing the face.
*/
intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
x += image_x;
y += image_y;
map->stride = mt->region->pitch;
map->ptr = base + y * map->stride + x * mt->cpp;
}
DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__,
map->x, map->y, map->w, map->h,
mt, _mesa_get_format_name(mt->format),
x, y, map->ptr, map->stride);
}
static void
intel_miptree_unmap_gtt(struct intel_mipmap_tree *mt)
{
intel_miptree_unmap_raw(mt);
}
static void
intel_miptree_map_blit(struct intel_context *intel,
struct intel_mipmap_tree *mt,
struct intel_miptree_map *map,
unsigned int level, unsigned int slice)
{
map->mt = intel_miptree_create(intel, GL_TEXTURE_2D, mt->format,
0, 0,
map->w, map->h, 1,
false,
INTEL_MIPTREE_TILING_NONE);
if (!map->mt) {
fprintf(stderr, "Failed to allocate blit temporary\n");
goto fail;
}
map->stride = map->mt->region->pitch;
if (!intel_miptree_blit(intel,
mt, level, slice,
map->x, map->y, false,
map->mt, 0, 0,
0, 0, false,
map->w, map->h, COLOR_LOGICOP_COPY)) {
fprintf(stderr, "Failed to blit\n");
goto fail;
}
intel_batchbuffer_flush(intel);
map->ptr = intel_miptree_map_raw(intel, map->mt);
DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__,
map->x, map->y, map->w, map->h,
mt, _mesa_get_format_name(mt->format),
level, slice, map->ptr, map->stride);
return;
fail:
intel_miptree_release(&map->mt);
map->ptr = NULL;
map->stride = 0;
}
static void
intel_miptree_unmap_blit(struct intel_context *intel,
struct intel_mipmap_tree *mt,
struct intel_miptree_map *map,
unsigned int level,
unsigned int slice)
{
struct gl_context *ctx = &intel->ctx;
intel_miptree_unmap_raw(map->mt);
if (map->mode & GL_MAP_WRITE_BIT) {
bool ok = intel_miptree_blit(intel,
map->mt, 0, 0,
0, 0, false,
mt, level, slice,
map->x, map->y, false,
map->w, map->h, COLOR_LOGICOP_COPY);
WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
}
intel_miptree_release(&map->mt);
}
/**
* Create and attach a map to the miptree at (level, slice). Return the
* attached map.
*/
static struct intel_miptree_map*
intel_miptree_attach_map(struct intel_mipmap_tree *mt,
unsigned int level,
unsigned int slice,
unsigned int x,
unsigned int y,
unsigned int w,
unsigned int h,
GLbitfield mode)
{
struct intel_miptree_map *map = calloc(1, sizeof(*map));
if (!map)
return NULL;
assert(mt->level[level].slice[slice].map == NULL);
mt->level[level].slice[slice].map = map;
map->mode = mode;
map->x = x;
map->y = y;
map->w = w;
map->h = h;
return map;
}
/**
* Release the map at (level, slice).
*/
static void
intel_miptree_release_map(struct intel_mipmap_tree *mt,
unsigned int level,
unsigned int slice)
{
struct intel_miptree_map **map;
map = &mt->level[level].slice[slice].map;
free(*map);
*map = NULL;
}
void
intel_miptree_map(struct intel_context *intel,
struct intel_mipmap_tree *mt,
unsigned int level,
unsigned int slice,
unsigned int x,
unsigned int y,
unsigned int w,
unsigned int h,
GLbitfield mode,
void **out_ptr,
int *out_stride)
{
struct intel_miptree_map *map;
map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
if (!map) {
*out_ptr = NULL;
*out_stride = 0;
return;
}
/* See intel_miptree_blit() for details on the 32k pitch limit. */
if (mt->region->tiling != I915_TILING_NONE &&
mt->region->bo->size >= intel->max_gtt_map_object_size) {
assert(mt->region->pitch < 32768);
intel_miptree_map_blit(intel, mt, map, level, slice);
} else {
intel_miptree_map_gtt(intel, mt, map, level, slice);
}
*out_ptr = map->ptr;
*out_stride = map->stride;
if (map->ptr == NULL)
intel_miptree_release_map(mt, level, slice);
}
void
intel_miptree_unmap(struct intel_context *intel,
struct intel_mipmap_tree *mt,
unsigned int level,
unsigned int slice)
{
struct intel_miptree_map *map = mt->level[level].slice[slice].map;
if (!map)
return;
DBG("%s: mt %p (%s) level %d slice %d\n", __func__,
mt, _mesa_get_format_name(mt->format), level, slice);
if (map->mt) {
intel_miptree_unmap_blit(intel, mt, map, level, slice);
} else {
intel_miptree_unmap_gtt(mt);
}
intel_miptree_release_map(mt, level, slice);
}

View File

@ -1,345 +0,0 @@
/**************************************************************************
*
* Copyright 2006 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_MIPMAP_TREE_H
#define INTEL_MIPMAP_TREE_H
#include <assert.h>
#include "intel_screen.h"
#include "intel_regions.h"
#include "GL/internal/dri_interface.h"
/* A layer on top of the intel_regions code which adds:
*
* - Code to size and layout a region to hold a set of mipmaps.
* - Query to determine if a new image fits in an existing tree.
* - More refcounting
* - maybe able to remove refcounting from intel_region?
* - ?
*
* The fixed mipmap layout of intel hardware where one offset
* specifies the position of all images in a mipmap hierachy
* complicates the implementation of GL texture image commands,
* compared to hardware where each image is specified with an
* independent offset.
*
* In an ideal world, each texture object would be associated with a
* single bufmgr buffer or 2d intel_region, and all the images within
* the texture object would slot into the tree as they arrive. The
* reality can be a little messier, as images can arrive from the user
* with sizes that don't fit in the existing tree, or in an order
* where the tree layout cannot be guessed immediately.
*
* This structure encodes an idealized mipmap tree. The GL image
* commands build these where possible, otherwise store the images in
* temporary system buffers.
*/
struct intel_texture_image;
struct intel_miptree_map {
/** Bitfield of GL_MAP_READ_BIT, GL_MAP_WRITE_BIT, GL_MAP_INVALIDATE_BIT */
GLbitfield mode;
/** Region of interest for the map. */
int x, y, w, h;
/** Possibly malloced temporary buffer for the mapping. */
void *buffer;
/** Possible pointer to a temporary linear miptree for the mapping. */
struct intel_mipmap_tree *mt;
/** Pointer to the start of (map_x, map_y) returned by the mapping. */
void *ptr;
/** Stride of the mapping. */
int stride;
};
/**
* Describes the location of each texture image within a texture region.
*/
struct intel_mipmap_level
{
/** Offset to this miptree level, used in computing x_offset. */
GLuint level_x;
/** Offset to this miptree level, used in computing y_offset. */
GLuint level_y;
GLuint width;
GLuint height;
/**
* \brief Number of 2D slices in this miplevel.
*
* The exact semantics of depth varies according to the texture target:
* - For GL_TEXTURE_CUBE_MAP, depth is 6.
* - For GL_TEXTURE_3D, it is the texture's depth at this miplevel. Its
* value, like width and height, varies with miplevel.
* - For other texture types, depth is 1.
*/
GLuint depth;
/**
* \brief List of 2D images in this mipmap level.
*
* This may be a list of cube faces, array slices in 2D array texture, or
* layers in a 3D texture. The list's length is \c depth.
*/
struct intel_mipmap_slice {
/**
* \name Offset to slice
* \{
*
* Hardware formats are so diverse that that there is no unified way to
* compute the slice offsets, so we store them in this table.
*
* The (x, y) offset to slice \c s at level \c l relative the miptrees
* base address is
* \code
* x = mt->level[l].slice[s].x_offset
* y = mt->level[l].slice[s].y_offset
*/
GLuint x_offset;
GLuint y_offset;
/** \} */
/**
* Mapping information. Persistent for the duration of
* intel_miptree_map/unmap on this slice.
*/
struct intel_miptree_map *map;
} *slice;
};
struct intel_mipmap_tree
{
/* Effectively the key:
*/
GLenum target;
/**
* This is just the same as the gl_texture_image->TexFormat or
* gl_renderbuffer->Format.
*/
mesa_format format;
/**
* The X offset of each image in the miptree must be aligned to this. See
* the "Alignment Unit Size" section of the BSpec.
*/
unsigned int align_w;
unsigned int align_h; /**< \see align_w */
GLuint first_level;
GLuint last_level;
/**
* Level zero image dimensions. These dimensions correspond to the
* physical layout of data in memory. Accordingly, they account for the
* extra factor of 6 in depth that must be allocated in order to
* accommodate cubemap textures.
*/
GLuint physical_width0, physical_height0, physical_depth0;
GLuint cpp;
bool compressed;
/* Derived from the above:
*/
GLuint total_width;
GLuint total_height;
/* Includes image offset tables:
*/
struct intel_mipmap_level level[MAX_TEXTURE_LEVELS];
/* The data is held here:
*/
struct intel_region *region;
/* Offset into region bo where miptree starts:
*/
uint32_t offset;
/* These are also refcounted:
*/
GLuint refcount;
};
enum intel_miptree_tiling_mode {
INTEL_MIPTREE_TILING_ANY,
INTEL_MIPTREE_TILING_Y,
INTEL_MIPTREE_TILING_NONE,
};
struct intel_mipmap_tree *intel_miptree_create(struct intel_context *intel,
GLenum target,
mesa_format format,
GLuint first_level,
GLuint last_level,
GLuint width0,
GLuint height0,
GLuint depth0,
bool expect_accelerated_upload,
enum intel_miptree_tiling_mode);
struct intel_mipmap_tree *
intel_miptree_create_layout(struct intel_context *intel,
GLenum target,
mesa_format format,
GLuint first_level,
GLuint last_level,
GLuint width0,
GLuint height0,
GLuint depth0);
struct intel_mipmap_tree *
intel_miptree_create_for_bo(struct intel_context *intel,
drm_intel_bo *bo,
mesa_format format,
uint32_t offset,
uint32_t width,
uint32_t height,
int pitch,
uint32_t tiling);
struct intel_mipmap_tree*
intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
unsigned dri_attachment,
mesa_format format,
struct intel_region *region);
struct intel_mipmap_tree*
intel_miptree_create_for_image_buffer(struct intel_context *intel,
enum __DRIimageBufferMask buffer_type,
mesa_format format,
uint32_t num_samples,
struct intel_region *region);
/**
* Create a miptree appropriate as the storage for a non-texture renderbuffer.
* The miptree has the following properties:
* - The target is GL_TEXTURE_2D.
* - There are no levels other than the base level 0.
* - Depth is 1.
*/
struct intel_mipmap_tree*
intel_miptree_create_for_renderbuffer(struct intel_context *intel,
mesa_format format,
uint32_t width,
uint32_t height);
/** \brief Assert that the level and layer are valid for the miptree. */
static inline void
intel_miptree_check_level_layer(struct intel_mipmap_tree *mt,
uint32_t level,
uint32_t layer)
{
(void) mt;
(void) level;
(void) layer;
assert(level >= mt->first_level);
assert(level <= mt->last_level);
assert(layer < mt->level[level].depth);
}
int intel_miptree_pitch_align (struct intel_context *intel,
struct intel_mipmap_tree *mt,
uint32_t tiling,
int pitch);
void intel_miptree_reference(struct intel_mipmap_tree **dst,
struct intel_mipmap_tree *src);
void intel_miptree_release(struct intel_mipmap_tree **mt);
/* Check if an image fits an existing mipmap tree layout
*/
bool intel_miptree_match_image(struct intel_mipmap_tree *mt,
struct gl_texture_image *image);
void
intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
GLuint level, GLuint slice,
GLuint *x, GLuint *y);
void
intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
int *width, int *height, int *depth);
uint32_t
intel_miptree_get_tile_offsets(struct intel_mipmap_tree *mt,
GLuint level, GLuint slice,
uint32_t *tile_x,
uint32_t *tile_y);
void intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
GLuint level,
GLuint x, GLuint y,
GLuint w, GLuint h, GLuint d);
void intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
GLuint level,
GLuint img, GLuint x, GLuint y);
void
intel_miptree_copy_teximage(struct intel_context *intel,
struct intel_texture_image *intelImage,
struct intel_mipmap_tree *dst_mt, bool invalidate);
/**\}*/
/* i915_mipmap_tree.c:
*/
void i915_miptree_layout(struct intel_mipmap_tree *mt);
void i945_miptree_layout(struct intel_mipmap_tree *mt);
void *intel_miptree_map_raw(struct intel_context *intel,
struct intel_mipmap_tree *mt);
void intel_miptree_unmap_raw(struct intel_mipmap_tree *mt);
void
intel_miptree_map(struct intel_context *intel,
struct intel_mipmap_tree *mt,
unsigned int level,
unsigned int slice,
unsigned int x,
unsigned int y,
unsigned int w,
unsigned int h,
GLbitfield mode,
void **out_ptr,
int *out_stride);
void
intel_miptree_unmap(struct intel_context *intel,
struct intel_mipmap_tree *mt,
unsigned int level,
unsigned int slice);
#endif

View File

@ -1,132 +0,0 @@
/**************************************************************************
*
* Copyright 2006 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portionsalloc
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/accum.h"
#include "main/enums.h"
#include "main/state.h"
#include "main/stencil.h"
#include "main/bufferobj.h"
#include "main/context.h"
#include "swrast/swrast.h"
#include "intel_context.h"
#include "intel_pixel.h"
#include "intel_regions.h"
#define FILE_DEBUG_FLAG DEBUG_PIXEL
static GLenum
effective_func(GLenum func, bool src_alpha_is_one)
{
if (src_alpha_is_one) {
if (func == GL_SRC_ALPHA)
return GL_ONE;
if (func == GL_ONE_MINUS_SRC_ALPHA)
return GL_ZERO;
}
return func;
}
/**
* Check if any fragment operations are in effect which might effect
* glDraw/CopyPixels.
*/
bool
intel_check_blit_fragment_ops(struct gl_context * ctx, bool src_alpha_is_one)
{
if (ctx->NewState)
_mesa_update_state(ctx);
if (_mesa_arb_fragment_program_enabled(ctx)) {
DBG("fallback due to fragment program\n");
return false;
}
if (ctx->Color.BlendEnabled &&
(effective_func(ctx->Color.Blend[0].SrcRGB, src_alpha_is_one) != GL_ONE ||
effective_func(ctx->Color.Blend[0].DstRGB, src_alpha_is_one) != GL_ZERO ||
ctx->Color.Blend[0].EquationRGB != GL_FUNC_ADD ||
effective_func(ctx->Color.Blend[0].SrcA, src_alpha_is_one) != GL_ONE ||
effective_func(ctx->Color.Blend[0].DstA, src_alpha_is_one) != GL_ZERO ||
ctx->Color.Blend[0].EquationA != GL_FUNC_ADD)) {
DBG("fallback due to blend\n");
return false;
}
if (ctx->Texture._MaxEnabledTexImageUnit != -1) {
DBG("fallback due to texturing\n");
return false;
}
if (GET_COLORMASK(ctx->Color.ColorMask, 0) != 0xf) {
DBG("fallback due to color masking\n");
return false;
}
if (ctx->Color.AlphaEnabled) {
DBG("fallback due to alpha\n");
return false;
}
if (ctx->Depth.Test) {
DBG("fallback due to depth test\n");
return false;
}
if (ctx->Fog.Enabled) {
DBG("fallback due to fog\n");
return false;
}
if (ctx->_ImageTransferState) {
DBG("fallback due to image transfer\n");
return false;
}
if (_mesa_stencil_is_enabled(ctx)) {
DBG("fallback due to image stencil\n");
return false;
}
if (ctx->RenderMode != GL_RENDER) {
DBG("fallback due to render mode\n");
return false;
}
return true;
}
void
intelInitPixelFuncs(struct dd_function_table *functions)
{
functions->Bitmap = intelBitmap;
functions->CopyPixels = intelCopyPixels;
functions->DrawPixels = intelDrawPixels;
functions->ReadPixels = intelReadPixels;
}

View File

@ -1,63 +0,0 @@
/**************************************************************************
*
* Copyright 2006 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_PIXEL_H
#define INTEL_PIXEL_H
#include "main/mtypes.h"
void intelInitPixelFuncs(struct dd_function_table *functions);
bool intel_check_blit_fragment_ops(struct gl_context * ctx,
bool src_alpha_is_one);
void intelReadPixels(struct gl_context * ctx,
GLint x, GLint y,
GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack,
GLvoid * pixels);
void intelDrawPixels(struct gl_context * ctx,
GLint x, GLint y,
GLsizei width, GLsizei height,
GLenum format,
GLenum type,
const struct gl_pixelstore_attrib *unpack,
const GLvoid * pixels);
void intelCopyPixels(struct gl_context * ctx,
GLint srcx, GLint srcy,
GLsizei width, GLsizei height,
GLint destx, GLint desty, GLenum type);
void intelBitmap(struct gl_context * ctx,
GLint x, GLint y,
GLsizei width, GLsizei height,
const struct gl_pixelstore_attrib *unpack,
const GLubyte * pixels);
#endif

View File

@ -1,359 +0,0 @@
/**************************************************************************
*
* Copyright 2006 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portionsalloc
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/glheader.h"
#include "main/enums.h"
#include "main/image.h"
#include "main/colormac.h"
#include "main/condrender.h"
#include "main/mtypes.h"
#include "main/macros.h"
#include "main/pbo.h"
#include "main/bufferobj.h"
#include "main/state.h"
#include "main/texobj.h"
#include "main/context.h"
#include "main/fbobject.h"
#include "swrast/swrast.h"
#include "drivers/common/meta.h"
#include "intel_screen.h"
#include "intel_context.h"
#include "intel_batchbuffer.h"
#include "intel_blit.h"
#include "intel_fbo.h"
#include "intel_regions.h"
#include "intel_buffers.h"
#include "intel_pixel.h"
#include "intel_reg.h"
#define FILE_DEBUG_FLAG DEBUG_PIXEL
/* Unlike the other intel_pixel_* functions, the expectation here is
* that the incoming data is not in a PBO. With the XY_TEXT blit
* method, there's no benefit haveing it in a PBO, but we could
* implement a path based on XY_MONO_SRC_COPY_BLIT which might benefit
* PBO bitmaps. I think they are probably pretty rare though - I
* wonder if Xgl uses them?
*/
static const GLubyte *map_pbo( struct gl_context *ctx,
GLsizei width, GLsizei height,
const struct gl_pixelstore_attrib *unpack,
const GLubyte *bitmap )
{
GLubyte *buf;
if (!_mesa_validate_pbo_access(2, unpack, width, height, 1,
GL_COLOR_INDEX, GL_BITMAP,
INT_MAX, (const GLvoid *) bitmap)) {
_mesa_error(ctx, GL_INVALID_OPERATION,"glBitmap(invalid PBO access)");
return NULL;
}
buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0, unpack->BufferObj->Size,
GL_MAP_READ_BIT,
unpack->BufferObj,
MAP_INTERNAL);
if (!buf) {
_mesa_error(ctx, GL_INVALID_OPERATION, "glBitmap(PBO is mapped)");
return NULL;
}
return ADD_POINTERS(buf, bitmap);
}
static bool test_bit( const GLubyte *src, GLuint bit )
{
return (src[bit/8] & (1<<(bit % 8))) ? 1 : 0;
}
static void set_bit( GLubyte *dest, GLuint bit )
{
dest[bit/8] |= 1 << (bit % 8);
}
/* Extract a rectangle's worth of data from the bitmap. Called
* per chunk of HW-sized bitmap.
*/
static GLuint get_bitmap_rect(GLsizei width, GLsizei height,
const struct gl_pixelstore_attrib *unpack,
const GLubyte *bitmap,
GLuint x, GLuint y,
GLuint w, GLuint h,
GLubyte *dest,
GLuint row_align,
bool invert)
{
GLuint src_offset = (x + unpack->SkipPixels) & 0x7;
GLuint mask = unpack->LsbFirst ? 0 : 7;
GLuint bit = 0;
GLint row, col;
GLint first, last;
GLint incr;
GLuint count = 0;
DBG("%s %d,%d %dx%d bitmap %dx%d skip %d src_offset %d mask %d\n",
__func__, x,y,w,h,width,height,unpack->SkipPixels, src_offset, mask);
if (invert) {
first = h-1;
last = 0;
incr = -1;
}
else {
first = 0;
last = h-1;
incr = 1;
}
/* Require that dest be pre-zero'd.
*/
for (row = first; row != (last+incr); row += incr) {
const GLubyte *rowsrc = _mesa_image_address2d(unpack, bitmap,
width, height,
GL_COLOR_INDEX, GL_BITMAP,
y + row, x);
for (col = 0; col < w; col++, bit++) {
if (test_bit(rowsrc, (col + src_offset) ^ mask)) {
set_bit(dest, bit ^ 7);
count++;
}
}
if (row_align)
bit = ALIGN(bit, row_align);
}
return count;
}
/**
* Returns the low Y value of the vertical range given, flipped according to
* whether the framebuffer is or not.
*/
static inline int
y_flip(struct gl_framebuffer *fb, int y, int height)
{
if (_mesa_is_user_fbo(fb))
return y;
else
return fb->Height - y - height;
}
/*
* Render a bitmap.
*/
static bool
do_blit_bitmap( struct gl_context *ctx,
GLint dstx, GLint dsty,
GLsizei width, GLsizei height,
const struct gl_pixelstore_attrib *unpack,
const GLubyte *bitmap )
{
struct intel_context *intel = intel_context(ctx);
struct gl_framebuffer *fb = ctx->DrawBuffer;
struct intel_renderbuffer *irb;
GLfloat tmpColor[4];
GLubyte ubcolor[4];
GLuint color;
GLsizei bitmap_width = width;
GLsizei bitmap_height = height;
GLint px, py;
GLuint stipple[32];
GLint orig_dstx = dstx;
GLint orig_dsty = dsty;
/* Update draw buffer bounds */
_mesa_update_state(ctx);
if (ctx->Depth.Test) {
/* The blit path produces incorrect results when depth testing is on.
* It seems the blit Z coord is always 1.0 (the far plane) so fragments
* will likely be obscured by other, closer geometry.
*/
return false;
}
intel_prepare_render(intel);
if (fb->_NumColorDrawBuffers != 1) {
perf_debug("accelerated glBitmap() only supports rendering to a "
"single color buffer\n");
return false;
}
irb = intel_renderbuffer(fb->_ColorDrawBuffers[0]);
if (unpack->BufferObj) {
bitmap = map_pbo(ctx, width, height, unpack, bitmap);
if (bitmap == NULL)
return true; /* even though this is an error, we're done */
}
COPY_4V(tmpColor, ctx->Current.RasterColor);
if (_mesa_need_secondary_color(ctx)) {
ADD_3V(tmpColor, tmpColor, ctx->Current.RasterSecondaryColor);
}
UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[0], tmpColor[0]);
UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[1], tmpColor[1]);
UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[2], tmpColor[2]);
UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[3], tmpColor[3]);
switch (irb->mt->format) {
case MESA_FORMAT_B8G8R8A8_UNORM:
case MESA_FORMAT_B8G8R8X8_UNORM:
color = PACK_COLOR_8888(ubcolor[3], ubcolor[0], ubcolor[1], ubcolor[2]);
break;
case MESA_FORMAT_B5G6R5_UNORM:
color = PACK_COLOR_565(ubcolor[0], ubcolor[1], ubcolor[2]);
break;
default:
perf_debug("Unsupported format %s in accelerated glBitmap()\n",
_mesa_get_format_name(irb->mt->format));
return false;
}
if (!intel_check_blit_fragment_ops(ctx, tmpColor[3] == 1.0F))
return false;
/* Clip to buffer bounds and scissor. */
if (!_mesa_clip_to_region(fb->_Xmin, fb->_Ymin,
fb->_Xmax, fb->_Ymax,
&dstx, &dsty, &width, &height))
goto out;
dsty = y_flip(fb, dsty, height);
#define DY 32
#define DX 32
/* Chop it all into chunks that can be digested by hardware: */
for (py = 0; py < height; py += DY) {
for (px = 0; px < width; px += DX) {
int h = MIN2(DY, height - py);
int w = MIN2(DX, width - px);
GLuint sz = ALIGN(ALIGN(w,8) * h, 64)/8;
const enum gl_logicop_mode logic_op = ctx->Color.ColorLogicOpEnabled ?
ctx->Color._LogicOp : COLOR_LOGICOP_COPY;
assert(sz <= sizeof(stipple));
memset(stipple, 0, sz);
/* May need to adjust this when padding has been introduced in
* sz above:
*
* Have to translate destination coordinates back into source
* coordinates.
*/
int count = get_bitmap_rect(bitmap_width, bitmap_height, unpack,
bitmap,
-orig_dstx + (dstx + px),
-orig_dsty + y_flip(fb, dsty + py, h),
w, h,
(GLubyte *)stipple,
8,
_mesa_is_winsys_fbo(fb));
if (count == 0)
continue;
if (!intelEmitImmediateColorExpandBlit(intel,
irb->mt->cpp,
(GLubyte *)stipple,
sz,
color,
irb->mt->region->pitch,
irb->mt->region->bo,
0,
irb->mt->region->tiling,
dstx + px,
dsty + py,
w, h,
logic_op)) {
return false;
}
if (ctx->Query.CurrentOcclusionObject)
ctx->Query.CurrentOcclusionObject->Result += count;
}
}
out:
if (unlikely(INTEL_DEBUG & DEBUG_SYNC))
intel_batchbuffer_flush(intel);
if (unpack->BufferObj) {
/* done with PBO so unmap it now */
ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
}
intel_check_front_buffer_rendering(intel);
return true;
}
/* There are a large number of possible ways to implement bitmap on
* this hardware, most of them have some sort of drawback. Here are a
* few that spring to mind:
*
* Blit:
* - XY_MONO_SRC_BLT_CMD
* - use XY_SETUP_CLIP_BLT for cliprect clipping.
* - XY_TEXT_BLT
* - XY_TEXT_IMMEDIATE_BLT
* - blit per cliprect, subject to maximum immediate data size.
* - XY_COLOR_BLT
* - per pixel or run of pixels
* - XY_PIXEL_BLT
* - good for sparse bitmaps
*
* 3D engine:
* - Point per pixel
* - Translate bitmap to an alpha texture and render as a quad
* - Chop bitmap up into 32x32 squares and render w/polygon stipple.
*/
void
intelBitmap(struct gl_context * ctx,
GLint x, GLint y,
GLsizei width, GLsizei height,
const struct gl_pixelstore_attrib *unpack,
const GLubyte * pixels)
{
if (!_mesa_check_conditional_render(ctx))
return;
if (do_blit_bitmap(ctx, x, y, width, height,
unpack, pixels))
return;
_mesa_meta_Bitmap(ctx, x, y, width, height, unpack, pixels);
}

View File

@ -1,208 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/glheader.h"
#include "main/image.h"
#include "main/state.h"
#include "main/stencil.h"
#include "main/mtypes.h"
#include "main/condrender.h"
#include "main/fbobject.h"
#include "drivers/common/meta.h"
#include "intel_context.h"
#include "intel_buffers.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
#include "intel_pixel.h"
#include "intel_fbo.h"
#include "intel_blit.h"
#define FILE_DEBUG_FLAG DEBUG_PIXEL
/**
* CopyPixels with the blitter. Don't support zooming, pixel transfer, etc.
*/
static bool
do_blit_copypixels(struct gl_context * ctx,
GLint srcx, GLint srcy,
GLsizei width, GLsizei height,
GLint dstx, GLint dsty, GLenum type)
{
struct intel_context *intel = intel_context(ctx);
struct gl_framebuffer *fb = ctx->DrawBuffer;
struct gl_framebuffer *read_fb = ctx->ReadBuffer;
GLint orig_dstx;
GLint orig_dsty;
GLint orig_srcx;
GLint orig_srcy;
struct intel_renderbuffer *draw_irb = NULL;
struct intel_renderbuffer *read_irb = NULL;
/* Update draw buffer bounds */
_mesa_update_state(ctx);
switch (type) {
case GL_COLOR:
if (fb->_NumColorDrawBuffers != 1) {
perf_debug("glCopyPixels() fallback: MRT\n");
return false;
}
draw_irb = intel_renderbuffer(fb->_ColorDrawBuffers[0]);
read_irb = intel_renderbuffer(read_fb->_ColorReadBuffer);
break;
case GL_DEPTH_STENCIL_EXT:
draw_irb = intel_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
read_irb =
intel_renderbuffer(read_fb->Attachment[BUFFER_DEPTH].Renderbuffer);
break;
case GL_DEPTH:
perf_debug("glCopyPixels() fallback: GL_DEPTH\n");
return false;
case GL_STENCIL:
perf_debug("glCopyPixels() fallback: GL_STENCIL\n");
return false;
default:
perf_debug("glCopyPixels(): Unknown type\n");
return false;
}
if (!draw_irb) {
perf_debug("glCopyPixels() fallback: missing draw buffer\n");
return false;
}
if (!read_irb) {
perf_debug("glCopyPixels() fallback: missing read buffer\n");
return false;
}
if (ctx->_ImageTransferState) {
perf_debug("glCopyPixels(): Unsupported image transfer state\n");
return false;
}
if (ctx->Depth.Test) {
perf_debug("glCopyPixels(): Unsupported depth test state\n");
return false;
}
if (_mesa_stencil_is_enabled(ctx)) {
perf_debug("glCopyPixels(): Unsupported stencil test state\n");
return false;
}
if (ctx->Fog.Enabled ||
ctx->Texture._MaxEnabledTexImageUnit != -1 ||
_mesa_arb_fragment_program_enabled(ctx)) {
perf_debug("glCopyPixels(): Unsupported fragment shader state\n");
return false;
}
if (ctx->Color.AlphaEnabled ||
ctx->Color.BlendEnabled) {
perf_debug("glCopyPixels(): Unsupported blend state\n");
return false;
}
if (GET_COLORMASK(ctx->Color.ColorMask, 0) != 0xf) {
perf_debug("glCopyPixels(): Unsupported color mask state\n");
return false;
}
if (ctx->Pixel.ZoomX != 1.0F || ctx->Pixel.ZoomY != 1.0F) {
perf_debug("glCopyPixels(): Unsupported pixel zoom\n");
return false;
}
intel_prepare_render(intel);
intel_flush(&intel->ctx);
/* Clip to destination buffer. */
orig_dstx = dstx;
orig_dsty = dsty;
if (!_mesa_clip_to_region(fb->_Xmin, fb->_Ymin,
fb->_Xmax, fb->_Ymax,
&dstx, &dsty, &width, &height))
goto out;
/* Adjust src coords for our post-clipped destination origin */
srcx += dstx - orig_dstx;
srcy += dsty - orig_dsty;
/* Clip to source buffer. */
orig_srcx = srcx;
orig_srcy = srcy;
if (!_mesa_clip_to_region(0, 0,
read_fb->Width, read_fb->Height,
&srcx, &srcy, &width, &height))
goto out;
/* Adjust dst coords for our post-clipped source origin */
dstx += srcx - orig_srcx;
dsty += srcy - orig_srcy;
if (!intel_miptree_blit(intel,
read_irb->mt, read_irb->mt_level, read_irb->mt_layer,
srcx, srcy, _mesa_is_winsys_fbo(read_fb),
draw_irb->mt, draw_irb->mt_level, draw_irb->mt_layer,
dstx, dsty, _mesa_is_winsys_fbo(fb),
width, height,
(ctx->Color.ColorLogicOpEnabled ?
ctx->Color._LogicOp : COLOR_LOGICOP_COPY))) {
DBG("%s: blit failure\n", __func__);
return false;
}
if (ctx->Query.CurrentOcclusionObject)
ctx->Query.CurrentOcclusionObject->Result += width * height;
out:
intel_check_front_buffer_rendering(intel);
DBG("%s: success\n", __func__);
return true;
}
void
intelCopyPixels(struct gl_context * ctx,
GLint srcx, GLint srcy,
GLsizei width, GLsizei height,
GLint destx, GLint desty, GLenum type)
{
DBG("%s\n", __func__);
if (!_mesa_check_conditional_render(ctx))
return;
if (do_blit_copypixels(ctx, srcx, srcy, width, height, destx, desty, type))
return;
/* this will use swrast if needed */
_mesa_meta_CopyPixels(ctx, srcx, srcy, width, height, destx, desty, type);
}

View File

@ -1,58 +0,0 @@
/**************************************************************************
*
* Copyright 2006 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portionsalloc
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/glheader.h"
#include "main/enums.h"
#include "main/image.h"
#include "main/mtypes.h"
#include "main/teximage.h"
#include "main/texobj.h"
#include "main/texstate.h"
#include "swrast/swrast.h"
#include "drivers/common/meta.h"
#include "intel_context.h"
#include "intel_pixel.h"
void
intelDrawPixels(struct gl_context * ctx,
GLint x, GLint y,
GLsizei width, GLsizei height,
GLenum format,
GLenum type,
const struct gl_pixelstore_attrib *unpack,
const GLvoid * pixels)
{
if (format == GL_STENCIL_INDEX) {
_swrast_DrawPixels(ctx, x, y, width, height, format, type,
unpack, pixels);
return;
}
_mesa_meta_DrawPixels(ctx, x, y, width, height, format, type,
unpack, pixels);
}

View File

@ -1,197 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/glheader.h"
#include "main/enums.h"
#include "main/mtypes.h"
#include "main/macros.h"
#include "main/fbobject.h"
#include "main/image.h"
#include "main/bufferobj.h"
#include "main/readpix.h"
#include "main/state.h"
#include "intel_screen.h"
#include "intel_context.h"
#include "intel_blit.h"
#include "intel_buffers.h"
#include "intel_fbo.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
#include "intel_pixel.h"
#include "intel_buffer_objects.h"
#define FILE_DEBUG_FLAG DEBUG_PIXEL
/* For many applications, the new ability to pull the source buffers
* back out of the GTT and then do the packing/conversion operations
* in software will be as much of an improvement as trying to get the
* blitter and/or texture engine to do the work.
*
* This step is gated on private backbuffers.
*
* Obviously the frontbuffer can't be pulled back, so that is either
* an argument for blit/texture readpixels, or for blitting to a
* temporary and then pulling that back.
*
* When the destination is a pbo, however, it's not clear if it is
* ever going to be pulled to main memory (though the access param
* will be a good hint). So it sounds like we do want to be able to
* choose between blit/texture implementation on the gpu and pullback
* and cpu-based copying.
*
* Unless you can magically turn client memory into a PBO for the
* duration of this call, there will be a cpu-based copying step in
* any case.
*/
static bool
do_blit_readpixels(struct gl_context * ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *dst = intel_buffer_object(pack->BufferObj);
GLuint dst_offset;
drm_intel_bo *dst_buffer;
GLint dst_x, dst_y;
GLuint dirty;
DBG("%s\n", __func__);
assert(pack->BufferObj);
struct gl_renderbuffer *rb = ctx->ReadBuffer->_ColorReadBuffer;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
if (ctx->_ImageTransferState ||
!_mesa_format_matches_format_and_type(irb->mt->format, format, type,
false, NULL)) {
DBG("%s - bad format for blit\n", __func__);
return false;
}
if (pack->SwapBytes || pack->LsbFirst) {
DBG("%s: bad packing params\n", __func__);
return false;
}
int dst_stride = _mesa_image_row_stride(pack, width, format, type);
bool dst_flip = false;
/* Mesa flips the dst_stride for pack->Invert, but we want our mt to have a
* normal dst_stride.
*/
if (pack->Invert) {
dst_stride = -dst_stride;
dst_flip = true;
}
dst_offset = (GLintptr)pixels;
dst_offset += _mesa_image_offset(2, pack, width, height,
format, type, 0, 0, 0);
if (!_mesa_clip_copytexsubimage(ctx,
&dst_x, &dst_y,
&x, &y,
&width, &height)) {
return true;
}
dirty = intel->front_buffer_dirty;
intel_prepare_render(intel);
intel->front_buffer_dirty = dirty;
dst_buffer = intel_bufferobj_buffer(intel, dst);
struct intel_mipmap_tree *pbo_mt =
intel_miptree_create_for_bo(intel,
dst_buffer,
irb->mt->format,
dst_offset,
width, height,
dst_stride, I915_TILING_NONE);
if (!intel_miptree_blit(intel,
irb->mt, irb->mt_level, irb->mt_layer,
x, y, _mesa_is_winsys_fbo(ctx->ReadBuffer),
pbo_mt, 0, 0,
0, 0, dst_flip,
width, height, COLOR_LOGICOP_COPY)) {
intel_miptree_release(&pbo_mt);
return false;
}
intel_miptree_release(&pbo_mt);
DBG("%s - DONE\n", __func__);
return true;
}
void
intelReadPixels(struct gl_context * ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
struct intel_context *intel = intel_context(ctx);
bool dirty;
intel_flush_rendering_to_batch(ctx);
DBG("%s\n", __func__);
if (pack->BufferObj) {
/* Using PBOs, so try the BLT based path. */
if (do_blit_readpixels(ctx, x, y, width, height, format, type, pack,
pixels)) {
return;
}
perf_debug("%s: fallback to CPU mapping in PBO case\n", __func__);
}
/* glReadPixels() wont dirty the front buffer, so reset the dirty
* flag after calling intel_prepare_render(). */
dirty = intel->front_buffer_dirty;
intel_prepare_render(intel);
intel->front_buffer_dirty = dirty;
/* Update Mesa state before calling _mesa_readpixels().
* XXX this may not be needed since ReadPixels no longer uses the
* span code.
*/
if (ctx->NewState)
_mesa_update_state(ctx);
_mesa_readpixels(ctx, x, y, width, height, format, type, pack, pixels);
/* There's an intel_prepare_render() call in intelSpanRenderStart(). */
intel->front_buffer_dirty = dirty;
}

View File

@ -1,237 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#define CMD_MI (0x0 << 29)
#define CMD_2D (0x2 << 29)
#define CMD_3D (0x3 << 29)
#define MI_NOOP (CMD_MI | 0)
#define MI_BATCH_BUFFER_END (CMD_MI | 0xA << 23)
#define MI_FLUSH (CMD_MI | (4 << 23))
#define FLUSH_MAP_CACHE (1 << 0)
#define INHIBIT_FLUSH_RENDER_CACHE (1 << 2)
#define MI_LOAD_REGISTER_IMM (CMD_MI | (0x22 << 23))
#define MI_FLUSH_DW (CMD_MI | (0x26 << 23) | 2)
/* Stalls command execution waiting for the given events to have occurred. */
#define MI_WAIT_FOR_EVENT (CMD_MI | (0x3 << 23))
#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
#define MI_STORE_REGISTER_MEM (CMD_MI | (0x24 << 23))
# define MI_STORE_REGISTER_MEM_USE_GGTT (1 << 22)
/* p189 */
#define _3DSTATE_LOAD_STATE_IMMEDIATE_1 (CMD_3D | (0x1d<<24) | (0x04<<16))
#define I1_LOAD_S(n) (1<<(4+n))
#define _3DSTATE_DRAWRECT_INFO (CMD_3D | (0x1d<<24) | (0x80<<16) | 0x3)
/** @} */
/** @{
* 915 definitions
*
* 915 documents say that bits 31:28 and 1 are "undefined, must be zero."
*/
#define S0_VB_OFFSET_MASK 0x0ffffffc
#define S0_AUTO_CACHE_INV_DISABLE (1<<0)
/** @} */
/** @{
* 830 definitions
*/
#define S0_VB_OFFSET_MASK_830 0xffffff80
#define S0_VB_PITCH_SHIFT_830 1
#define S0_VB_ENABLE_830 (1<<0)
/** @} */
#define S1_VERTEX_WIDTH_SHIFT 24
#define S1_VERTEX_WIDTH_MASK (0x3f<<24)
#define S1_VERTEX_PITCH_SHIFT 16
#define S1_VERTEX_PITCH_MASK (0x3f<<16)
#define TEXCOORDFMT_2D 0x0
#define TEXCOORDFMT_3D 0x1
#define TEXCOORDFMT_4D 0x2
#define TEXCOORDFMT_1D 0x3
#define TEXCOORDFMT_2D_16 0x4
#define TEXCOORDFMT_4D_16 0x5
#define TEXCOORDFMT_NOT_PRESENT 0xf
#define S2_TEXCOORD_FMT0_MASK 0xf
#define S2_TEXCOORD_FMT1_SHIFT 4
#define S2_TEXCOORD_FMT(unit, type) ((type)<<(unit*4))
#define S2_TEXCOORD_NONE (~0)
#define S2_TEX_COUNT_SHIFT_830 12
#define S2_VERTEX_1_WIDTH_SHIFT_830 0
#define S2_VERTEX_0_WIDTH_SHIFT_830 6
#define S3_TEXCOORD_WRAP_SHORTEST_TCX(unit) (1<<((unit)*4+3))
#define S3_TEXCOORD_WRAP_SHORTEST_TCY(unit) (1<<((unit)*4+2))
#define S3_TEXCOORD_WRAP_SHORTEST_TCZ(unit) (1<<((unit)*4+1))
#define S3_TEXCOORD_PERSPECTIVE_DISABLE(unit) (1<<((unit)*4+0))
#define S4_POINT_WIDTH_SHIFT 23
#define S4_POINT_WIDTH_MASK (0x1ff<<23)
#define S4_LINE_WIDTH_SHIFT 19
#define S4_LINE_WIDTH_ONE (0x2<<19)
#define S4_LINE_WIDTH_MASK (0xf<<19)
#define S4_FLATSHADE_ALPHA (1<<18)
#define S4_FLATSHADE_FOG (1<<17)
#define S4_FLATSHADE_SPECULAR (1<<16)
#define S4_FLATSHADE_COLOR (1<<15)
#define S4_CULLMODE_BOTH (0<<13)
#define S4_CULLMODE_NONE (1<<13)
#define S4_CULLMODE_CW (2<<13)
#define S4_CULLMODE_CCW (3<<13)
#define S4_CULLMODE_MASK (3<<13)
#define S4_VFMT_POINT_WIDTH (1<<12)
#define S4_VFMT_SPEC_FOG (1<<11)
#define S4_VFMT_COLOR (1<<10)
#define S4_VFMT_DEPTH_OFFSET (1<<9)
#define S4_VFMT_XYZ (1<<6)
#define S4_VFMT_XYZW (2<<6)
#define S4_VFMT_XY (3<<6)
#define S4_VFMT_XYW (4<<6)
#define S4_VFMT_XYZW_MASK (7<<6)
#define S4_FORCE_DEFAULT_DIFFUSE (1<<5)
#define S4_FORCE_DEFAULT_SPECULAR (1<<4)
#define S4_LOCAL_DEPTH_OFFSET_ENABLE (1<<3)
#define S4_VFMT_FOG_PARAM (1<<2)
#define S4_SPRITE_POINT_ENABLE (1<<1)
#define S4_LINE_ANTIALIAS_ENABLE (1<<0)
#define S4_VFMT_MASK (S4_VFMT_POINT_WIDTH | \
S4_VFMT_SPEC_FOG | \
S4_VFMT_COLOR | \
S4_VFMT_DEPTH_OFFSET | \
S4_VFMT_XYZW_MASK | \
S4_VFMT_FOG_PARAM)
#define S5_WRITEDISABLE_ALPHA (1<<31)
#define S5_WRITEDISABLE_RED (1<<30)
#define S5_WRITEDISABLE_GREEN (1<<29)
#define S5_WRITEDISABLE_BLUE (1<<28)
#define S5_WRITEDISABLE_MASK (0xf<<28)
#define S5_FORCE_DEFAULT_POINT_SIZE (1<<27)
#define S5_LAST_PIXEL_ENABLE (1<<26)
#define S5_GLOBAL_DEPTH_OFFSET_ENABLE (1<<25)
#define S5_FOG_ENABLE (1<<24)
#define S5_STENCIL_REF_SHIFT 16
#define S5_STENCIL_REF_MASK (0xff<<16)
#define S5_STENCIL_TEST_FUNC_SHIFT 13
#define S5_STENCIL_TEST_FUNC_MASK (0x7<<13)
#define S5_STENCIL_FAIL_SHIFT 10
#define S5_STENCIL_FAIL_MASK (0x7<<10)
#define S5_STENCIL_PASS_Z_FAIL_SHIFT 7
#define S5_STENCIL_PASS_Z_FAIL_MASK (0x7<<7)
#define S5_STENCIL_PASS_Z_PASS_SHIFT 4
#define S5_STENCIL_PASS_Z_PASS_MASK (0x7<<4)
#define S5_STENCIL_WRITE_ENABLE (1<<3)
#define S5_STENCIL_TEST_ENABLE (1<<2)
#define S5_COLOR_DITHER_ENABLE (1<<1)
#define S5_LOGICOP_ENABLE (1<<0)
#define S6_ALPHA_TEST_ENABLE (1<<31)
#define S6_ALPHA_TEST_FUNC_SHIFT 28
#define S6_ALPHA_TEST_FUNC_MASK (0x7<<28)
#define S6_ALPHA_REF_SHIFT 20
#define S6_ALPHA_REF_MASK (0xff<<20)
#define S6_DEPTH_TEST_ENABLE (1<<19)
#define S6_DEPTH_TEST_FUNC_SHIFT 16
#define S6_DEPTH_TEST_FUNC_MASK (0x7<<16)
#define S6_CBUF_BLEND_ENABLE (1<<15)
#define S6_CBUF_BLEND_FUNC_SHIFT 12
#define S6_CBUF_BLEND_FUNC_MASK (0x7<<12)
#define S6_CBUF_SRC_BLEND_FACT_SHIFT 8
#define S6_CBUF_SRC_BLEND_FACT_MASK (0xf<<8)
#define S6_CBUF_DST_BLEND_FACT_SHIFT 4
#define S6_CBUF_DST_BLEND_FACT_MASK (0xf<<4)
#define S6_DEPTH_WRITE_ENABLE (1<<3)
#define S6_COLOR_WRITE_ENABLE (1<<2)
#define S6_TRISTRIP_PV_SHIFT 0
#define S6_TRISTRIP_PV_MASK (0x3<<0)
#define S7_DEPTH_OFFSET_CONST_MASK ~0
/* p143 */
#define _3DSTATE_BUF_INFO_CMD (CMD_3D | (0x1d<<24) | (0x8e<<16) | 1)
/* Dword 1 */
#define BUF_3D_ID_COLOR_BACK (0x3<<24)
#define BUF_3D_ID_DEPTH (0x7<<24)
#define BUF_3D_USE_FENCE (1<<23)
#define BUF_3D_TILED_SURFACE (1<<22)
#define BUF_3D_TILE_WALK_X 0
#define BUF_3D_TILE_WALK_Y (1<<21)
#define BUF_3D_PITCH(x) (((x)/4)<<2)
/* Dword 2 */
#define BUF_3D_ADDR(x) ((x) & ~0x3)
/* Primitive dispatch on 830-945 */
#define _3DPRIMITIVE (CMD_3D | (0x1f << 24))
#define PRIM_INDIRECT (1<<23)
#define PRIM_INLINE (0<<23)
#define PRIM_INDIRECT_SEQUENTIAL (0<<17)
#define PRIM_INDIRECT_ELTS (1<<17)
#define PRIM3D_TRILIST (0x0<<18)
#define PRIM3D_TRISTRIP (0x1<<18)
#define PRIM3D_TRISTRIP_RVRSE (0x2<<18)
#define PRIM3D_TRIFAN (0x3<<18)
#define PRIM3D_POLY (0x4<<18)
#define PRIM3D_LINELIST (0x5<<18)
#define PRIM3D_LINESTRIP (0x6<<18)
#define PRIM3D_RECTLIST (0x7<<18)
#define PRIM3D_POINTLIST (0x8<<18)
#define PRIM3D_DIB (0x9<<18)
#define PRIM3D_MASK (0x1f<<18)
#define XY_SETUP_BLT_CMD (CMD_2D | (0x01 << 22))
#define XY_COLOR_BLT_CMD (CMD_2D | (0x50 << 22))
#define XY_SRC_COPY_BLT_CMD (CMD_2D | (0x53 << 22))
#define XY_TEXT_IMMEDIATE_BLIT_CMD (CMD_2D | (0x31 << 22))
# define XY_TEXT_BYTE_PACKED (1 << 16)
/* BR00 */
#define XY_BLT_WRITE_ALPHA (1 << 21)
#define XY_BLT_WRITE_RGB (1 << 20)
#define XY_SRC_TILED (1 << 15)
#define XY_DST_TILED (1 << 11)
/* BR13 */
#define BR13_8 (0x0 << 24)
#define BR13_565 (0x1 << 24)
#define BR13_8888 (0x3 << 24)

View File

@ -1,284 +0,0 @@
/**************************************************************************
*
* Copyright 2006 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/* Provide additional functionality on top of bufmgr buffers:
* - 2d semantics and blit operations
* - refcounting of buffers for multiple images in a buffer.
* - refcounting of buffer mappings.
* - some logic for moving the buffers to the best memory pools for
* given operations.
*
* Most of this is to make it easier to implement the fixed-layout
* mipmap tree required by intel hardware in the face of GL's
* programming interface where each image can be specifed in random
* order and it isn't clear what layout the tree should have until the
* last moment.
*/
#include <sys/ioctl.h>
#include <errno.h>
#include "main/hash.h"
#include "intel_context.h"
#include "intel_regions.h"
#include "intel_blit.h"
#include "intel_buffer_objects.h"
#include "intel_bufmgr.h"
#include "intel_batchbuffer.h"
#define FILE_DEBUG_FLAG DEBUG_REGION
static struct intel_region *
intel_region_alloc_internal(struct intel_screen *screen,
GLuint cpp,
GLuint width, GLuint height, GLuint pitch,
uint32_t tiling, drm_intel_bo *buffer)
{
struct intel_region *region;
region = calloc(sizeof(*region), 1);
if (region == NULL)
return region;
region->cpp = cpp;
region->width = width;
region->height = height;
region->pitch = pitch;
region->refcount = 1;
region->bo = buffer;
region->tiling = tiling;
DBG("%s <-- %p\n", __func__, region);
return region;
}
struct intel_region *
intel_region_alloc(struct intel_screen *screen,
uint32_t tiling,
GLuint cpp, GLuint width, GLuint height,
bool expect_accelerated_upload)
{
drm_intel_bo *buffer;
unsigned long flags = 0;
unsigned long aligned_pitch;
struct intel_region *region;
if (expect_accelerated_upload)
flags |= BO_ALLOC_FOR_RENDER;
buffer = drm_intel_bo_alloc_tiled(screen->bufmgr, "region",
width, height, cpp,
&tiling, &aligned_pitch, flags);
if (buffer == NULL)
return NULL;
region = intel_region_alloc_internal(screen, cpp, width, height,
aligned_pitch, tiling, buffer);
if (region == NULL) {
drm_intel_bo_unreference(buffer);
return NULL;
}
return region;
}
bool
intel_region_flink(struct intel_region *region, uint32_t *name)
{
if (region->name == 0) {
if (drm_intel_bo_flink(region->bo, &region->name))
return false;
}
*name = region->name;
return true;
}
struct intel_region *
intel_region_alloc_for_handle(struct intel_screen *screen,
GLuint cpp,
GLuint width, GLuint height, GLuint pitch,
GLuint handle, const char *name)
{
struct intel_region *region;
drm_intel_bo *buffer;
int ret;
uint32_t bit_6_swizzle, tiling;
buffer = drm_intel_bo_gem_create_from_name(screen->bufmgr, name, handle);
if (buffer == NULL)
return NULL;
ret = drm_intel_bo_get_tiling(buffer, &tiling, &bit_6_swizzle);
if (ret != 0) {
fprintf(stderr, "Couldn't get tiling of buffer %d (%s): %s\n",
handle, name, strerror(-ret));
drm_intel_bo_unreference(buffer);
return NULL;
}
region = intel_region_alloc_internal(screen, cpp,
width, height, pitch, tiling, buffer);
if (region == NULL) {
drm_intel_bo_unreference(buffer);
return NULL;
}
region->name = handle;
return region;
}
struct intel_region *
intel_region_alloc_for_fd(struct intel_screen *screen,
GLuint cpp,
GLuint width, GLuint height, GLuint pitch,
GLuint size,
int fd, const char *name)
{
struct intel_region *region;
drm_intel_bo *buffer;
int ret;
uint32_t bit_6_swizzle, tiling;
buffer = drm_intel_bo_gem_create_from_prime(screen->bufmgr, fd, size);
if (buffer == NULL)
return NULL;
ret = drm_intel_bo_get_tiling(buffer, &tiling, &bit_6_swizzle);
if (ret != 0) {
fprintf(stderr, "Couldn't get tiling of buffer (%s): %s\n",
name, strerror(-ret));
drm_intel_bo_unreference(buffer);
return NULL;
}
region = intel_region_alloc_internal(screen, cpp,
width, height, pitch, tiling, buffer);
if (region == NULL) {
drm_intel_bo_unreference(buffer);
return NULL;
}
return region;
}
void
intel_region_reference(struct intel_region **dst, struct intel_region *src)
{
DBG("%s: %p(%d) -> %p(%d)\n", __func__,
*dst, *dst ? (*dst)->refcount : 0, src, src ? src->refcount : 0);
if (src != *dst) {
if (*dst)
intel_region_release(dst);
if (src)
src->refcount++;
*dst = src;
}
}
void
intel_region_release(struct intel_region **region_handle)
{
struct intel_region *region = *region_handle;
if (region == NULL) {
DBG("%s NULL\n", __func__);
return;
}
DBG("%s %p %d\n", __func__, region, region->refcount - 1);
assert(region->refcount > 0);
region->refcount--;
if (region->refcount == 0) {
drm_intel_bo_unreference(region->bo);
free(region);
}
*region_handle = NULL;
}
/**
* This function computes masks that may be used to select the bits of the X
* and Y coordinates that indicate the offset within a tile. If the region is
* untiled, the masks are set to 0.
*/
void
intel_region_get_tile_masks(struct intel_region *region,
uint32_t *mask_x, uint32_t *mask_y)
{
int cpp = region->cpp;
uint32_t tiling = region->tiling;
switch (tiling) {
default:
assert(false);
case I915_TILING_NONE:
*mask_x = *mask_y = 0;
break;
case I915_TILING_X:
*mask_x = 512 / cpp - 1;
*mask_y = 7;
break;
case I915_TILING_Y:
*mask_x = 128 / cpp - 1;
*mask_y = 31;
break;
}
}
/**
* Compute the offset (in bytes) from the start of the region to the given x
* and y coordinate. For tiled regions, caller must ensure that x and y are
* multiples of the tile size.
*/
uint32_t
intel_region_get_aligned_offset(struct intel_region *region, uint32_t x,
uint32_t y)
{
int cpp = region->cpp;
uint32_t pitch = region->pitch;
uint32_t tiling = region->tiling;
switch (tiling) {
default:
assert(false);
case I915_TILING_NONE:
return y * pitch + x * cpp;
case I915_TILING_X:
assert((x % (512 / cpp)) == 0);
assert((y % 8) == 0);
return y * pitch + x / (512 / cpp) * 4096;
case I915_TILING_Y:
assert((x % (128 / cpp)) == 0);
assert((y % 32) == 0);
return y * pitch + x / (128 / cpp) * 4096;
}
}

View File

@ -1,151 +0,0 @@
/**************************************************************************
*
* Copyright 2006 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_REGIONS_H
#define INTEL_REGIONS_H
/** @file intel_regions.h
*
* Structure definitions and prototypes for intel_region handling,
* which is the basic structure for rectangular collections of pixels
* stored in a drm_intel_bo.
*/
#include <stdbool.h>
#include <xf86drm.h>
#include "main/mtypes.h"
#include "intel_bufmgr.h"
struct intel_context;
struct intel_screen;
struct intel_buffer_object;
/**
* A layer on top of the bufmgr buffers that adds a few useful things:
*
* - Refcounting for local buffer references.
* - Refcounting for buffer maps
* - Buffer dimensions - pitch and height.
* - Blitter commands for copying 2D regions between buffers. (really???)
*/
struct intel_region
{
drm_intel_bo *bo; /**< buffer manager's buffer */
GLuint refcount; /**< Reference count for region */
GLuint cpp; /**< bytes per pixel */
GLuint width; /**< in pixels */
GLuint height; /**< in pixels */
GLuint pitch; /**< in bytes */
uint32_t tiling; /**< Which tiling mode the region is in */
uint32_t name; /**< Global name for the bo */
};
/* Allocate a refcounted region. Pointers to regions should only be
* copied by calling intel_reference_region().
*/
struct intel_region *intel_region_alloc(struct intel_screen *screen,
uint32_t tiling,
GLuint cpp, GLuint width,
GLuint height,
bool expect_accelerated_upload);
struct intel_region *
intel_region_alloc_for_handle(struct intel_screen *screen,
GLuint cpp,
GLuint width, GLuint height, GLuint pitch,
unsigned int handle, const char *name);
struct intel_region *
intel_region_alloc_for_fd(struct intel_screen *screen,
GLuint cpp,
GLuint width, GLuint height, GLuint pitch,
GLuint size, int fd, const char *name);
bool
intel_region_flink(struct intel_region *region, uint32_t *name);
void intel_region_reference(struct intel_region **dst,
struct intel_region *src);
void intel_region_release(struct intel_region **ib);
void intel_recreate_static_regions(struct intel_context *intel);
void
intel_region_get_tile_masks(struct intel_region *region,
uint32_t *mask_x, uint32_t *mask_y);
uint32_t
intel_region_get_aligned_offset(struct intel_region *region, uint32_t x,
uint32_t y);
/**
* Used with images created with image_from_names
* to help support planar images.
*/
struct intel_image_format {
int fourcc;
int components;
int nplanes;
struct {
int buffer_index;
int width_shift;
int height_shift;
uint32_t dri_format;
int cpp;
} planes[3];
};
struct __DRIimageRec {
struct intel_region *region;
GLenum internal_format;
uint32_t dri_format;
GLuint format;
uint32_t offset;
/*
* Need to save these here between calls to
* image_from_names and calls to image_from_planar.
*/
uint32_t strides[3];
uint32_t offsets[3];
struct intel_image_format *planar_format;
/* particular miptree level */
GLuint width;
GLuint height;
GLuint tile_x;
GLuint tile_y;
void *data;
};
#endif

View File

@ -1,283 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Render unclipped vertex buffers by emitting vertices directly to
* dma buffers. Use strip/fan hardware acceleration where possible.
*
*/
#include "main/glheader.h"
#include "main/context.h"
#include "main/macros.h"
#include "main/mtypes.h"
#include "main/enums.h"
#include "math/m_xform.h"
#include "tnl/t_context.h"
#include "tnl/t_vertex.h"
#include "tnl/t_pipeline.h"
#include "intel_screen.h"
#include "intel_context.h"
#include "intel_tris.h"
#include "intel_batchbuffer.h"
#include "intel_reg.h"
/*
* Render unclipped vertex buffers by emitting vertices directly to
* dma buffers. Use strip/fan hardware primitives where possible.
* Try to simulate missing primitives with indexed vertices.
*/
#define HAVE_POINTS 1
#define HAVE_LINES 1
#define HAVE_LINE_STRIPS 1
#define HAVE_TRIANGLES 1
#define HAVE_TRI_STRIPS 1
#define HAVE_TRI_FANS 1
#define HAVE_POLYGONS 1
#define HAVE_QUADS 0
#define HAVE_QUAD_STRIPS 0
#define HAVE_ELTS 0
static const uint32_t hw_prim[GL_POLYGON + 1] = {
[GL_POINTS] = PRIM3D_POINTLIST,
[GL_LINES ] = PRIM3D_LINELIST,
[GL_LINE_LOOP] = PRIM3D_LINESTRIP,
[GL_LINE_STRIP] = PRIM3D_LINESTRIP,
[GL_TRIANGLES] = PRIM3D_TRILIST,
[GL_TRIANGLE_STRIP] = PRIM3D_TRISTRIP,
[GL_TRIANGLE_FAN] = PRIM3D_TRIFAN,
[GL_QUADS] = 0,
[GL_QUAD_STRIP] = 0,
[GL_POLYGON] = PRIM3D_POLY,
};
static const GLenum reduced_prim[GL_POLYGON + 1] = {
[GL_POINTS] = GL_POINTS,
[GL_LINES] = GL_LINES,
[GL_LINE_LOOP] = GL_LINES,
[GL_LINE_STRIP] = GL_LINES,
[GL_TRIANGLES] = GL_TRIANGLES,
[GL_TRIANGLE_STRIP] = GL_TRIANGLES,
[GL_TRIANGLE_FAN] = GL_TRIANGLES,
[GL_QUADS] = GL_TRIANGLES,
[GL_QUAD_STRIP] = GL_TRIANGLES,
[GL_POLYGON] = GL_TRIANGLES,
};
static const int scale_prim[GL_POLYGON + 1] = {
[GL_POINTS] = 1,
[GL_LINES] = 1,
[GL_LINE_LOOP] = 2,
[GL_LINE_STRIP] = 2,
[GL_TRIANGLES] = 1,
[GL_TRIANGLE_STRIP] = 3,
[GL_TRIANGLE_FAN] = 3,
[GL_QUADS] = 0, /* fallback case */
[GL_QUAD_STRIP] = 0, /* fallback case */
[GL_POLYGON] = 3,
};
static void
intelDmaPrimitive(struct intel_context *intel, GLenum prim)
{
if (0)
fprintf(stderr, "%s %s\n", __func__, _mesa_enum_to_string(prim));
INTEL_FIREVERTICES(intel);
intel->vtbl.reduced_primitive_state(intel, reduced_prim[prim]);
intel_set_prim(intel, hw_prim[prim]);
}
#define INTEL_NO_VBO_STATE_RESERVED 1500
static inline GLuint intel_get_vb_max(struct intel_context *intel)
{
GLuint ret;
if (intel->intelScreen->no_vbo) {
ret = intel->batch.bo->size - INTEL_NO_VBO_STATE_RESERVED;
} else
ret = INTEL_VB_SIZE;
ret /= (intel->vertex_size * 4);
return ret;
}
static inline GLuint intel_get_current_max(struct intel_context *intel)
{
GLuint ret;
if (intel->intelScreen->no_vbo) {
ret = intel_batchbuffer_space(intel);
ret = ret <= INTEL_NO_VBO_STATE_RESERVED ? 0 : ret - INTEL_NO_VBO_STATE_RESERVED;
} else
ret = (INTEL_VB_SIZE - intel->prim.current_offset);
return ret / (intel->vertex_size * 4);
}
#define LOCAL_VARS struct intel_context *intel = intel_context(ctx)
#define INIT( prim ) \
do { \
intelDmaPrimitive( intel, prim ); \
} while (0)
#define FLUSH() INTEL_FIREVERTICES(intel)
#define GET_SUBSEQUENT_VB_MAX_VERTS() intel_get_vb_max(intel)
#define GET_CURRENT_VB_MAX_VERTS() intel_get_current_max(intel)
#define ALLOC_VERTS(nr) intel_get_prim_space(intel, nr)
#define EMIT_VERTS( ctx, j, nr, buf ) \
_tnl_emit_vertices_to_buffer(ctx, j, (j)+(nr), buf )
#define TAG(x) intel_##x
#include "tnl_dd/t_dd_dmatmp.h"
/**********************************************************************/
/* Render pipeline stage */
/**********************************************************************/
/* Heuristic to choose between the two render paths:
*/
static bool
choose_render(struct intel_context *intel, struct vertex_buffer *VB)
{
int vertsz = intel->vertex_size;
int cost_render = 0;
int cost_fallback = 0;
int nr_prims = 0;
int nr_rprims = 0;
int nr_rverts = 0;
int rprim = intel->reduced_primitive;
int i = 0;
for (i = 0; i < VB->PrimitiveCount; i++) {
GLuint prim = VB->Primitive[i].mode;
GLuint length = VB->Primitive[i].count;
if (!length)
continue;
nr_prims++;
nr_rverts += length * scale_prim[prim & PRIM_MODE_MASK];
if (reduced_prim[prim & PRIM_MODE_MASK] != rprim) {
nr_rprims++;
rprim = reduced_prim[prim & PRIM_MODE_MASK];
}
}
/* One point for each generated primitive:
*/
cost_render = nr_prims;
cost_fallback = nr_rprims;
/* One point for every 1024 dwords (4k) of dma:
*/
cost_render += (vertsz * i) / 1024;
cost_fallback += (vertsz * nr_rverts) / 1024;
if (0)
fprintf(stderr, "cost render: %d fallback: %d\n",
cost_render, cost_fallback);
if (cost_render > cost_fallback)
return false;
return true;
}
static GLboolean
intel_run_render(struct gl_context * ctx, struct tnl_pipeline_stage *stage)
{
struct intel_context *intel = intel_context(ctx);
TNLcontext *tnl = TNL_CONTEXT(ctx);
struct vertex_buffer *VB = &tnl->vb;
GLuint i;
intel->vtbl.render_prevalidate( intel );
/* Don't handle clipping or indexed vertices.
*/
if (intel->RenderIndex != 0 ||
!intel_validate_render(ctx, VB) || !choose_render(intel, VB)) {
return true;
}
tnl->clipspace.new_inputs |= VERT_BIT_POS;
tnl->Driver.Render.Start(ctx);
for (i = 0; i < VB->PrimitiveCount; i++) {
GLuint prim = _tnl_translate_prim(&VB->Primitive[i]);
GLuint start = VB->Primitive[i].start;
GLuint length = VB->Primitive[i].count;
if (!length)
continue;
intel_render_tab_verts[prim & PRIM_MODE_MASK] (ctx, start,
length, prim);
}
tnl->Driver.Render.Finish(ctx);
INTEL_FIREVERTICES(intel);
return false; /* finished the pipe */
}
static const struct tnl_pipeline_stage _intel_render_stage = {
"intel render",
NULL,
NULL,
NULL,
NULL,
intel_run_render /* run */
};
const struct tnl_pipeline_stage *intel_pipeline[] = {
&_tnl_vertex_transform_stage,
&_tnl_normal_transform_stage,
&_tnl_lighting_stage,
&_tnl_fog_coordinate_stage,
&_tnl_texgen_stage,
&_tnl_texture_transform_stage,
&_tnl_point_attenuation_stage,
&_tnl_vertex_program_stage,
#if 1
&_intel_render_stage, /* ADD: unclipped rastersetup-to-dma */
#endif
&_tnl_render_stage,
0,
};

File diff suppressed because it is too large Load Diff

View File

@ -1,173 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef _INTEL_INIT_H_
#define _INTEL_INIT_H_
#include <stdbool.h>
#include <sys/time.h>
#include "dri_util.h"
#include "intel_bufmgr.h"
#include "drm-uapi/i915_drm.h"
#include "util/xmlconfig.h"
struct intel_screen
{
int deviceID;
int gen;
__DRIscreen *driScrnPriv;
bool no_hw;
bool no_vbo;
dri_bufmgr *bufmgr;
/**
* Configuration cache with default values for all contexts
*/
driOptionCache optionCache;
};
/* These defines are to ensure that i915_dri's symbols don't conflict with
* i965's when linked together.
*/
#define intel_region_alloc old_intel_region_alloc
#define intel_region_alloc_for_fd old_intel_region_alloc_for_fd
#define intel_region_alloc_for_handle old_intel_region_alloc_for_handle
#define intel_region_flink old_intel_region_flink
#define intel_region_get_aligned_offset old_intel_region_get_aligned_offset
#define intel_region_get_tile_masks old_intel_region_get_tile_masks
#define intel_region_reference old_intel_region_reference
#define intel_region_release old_intel_region_release
#define intel_bufferobj_buffer old_intel_bufferobj_buffer
#define intel_bufferobj_source old_intel_bufferobj_source
#define intelInitBufferObjectFuncs old_intelInitBufferObjectFuncs
#define intel_upload_data old_intel_upload_data
#define intel_upload_finish old_intel_upload_finish
#define intel_batchbuffer_data old_intel_batchbuffer_data
#define intel_batchbuffer_emit_mi_flush old_intel_batchbuffer_emit_mi_flush
#define intel_batchbuffer_emit_reloc old_intel_batchbuffer_emit_reloc
#define intel_batchbuffer_emit_reloc_fenced old_intel_batchbuffer_emit_reloc_fenced
#define _intel_batchbuffer_flush old__intel_batchbuffer_flush
#define intel_batchbuffer_free old_intel_batchbuffer_free
#define intel_batchbuffer_init old_intel_batchbuffer_init
#define intelInitClearFuncs old_intelInitClearFuncs
#define intelInitExtensions old_intelInitExtensions
#define intel_miptree_copy_teximage old_intel_miptree_copy_teximage
#define intel_miptree_create old_intel_miptree_create
#define intel_miptree_create_for_bo old_intel_miptree_create_for_bo
#define intel_miptree_create_for_dri2_buffer old_intel_miptree_create_for_dri2_buffer
#define intel_miptree_create_for_renderbuffer old_intel_miptree_create_for_renderbuffer
#define intel_miptree_create_layout old_intel_miptree_create_layout
#define intel_miptree_get_dimensions_for_image old_intel_miptree_get_dimensions_for_image
#define intel_miptree_get_image_offset old_intel_miptree_get_image_offset
#define intel_miptree_get_tile_offsets old_intel_miptree_get_tile_offsets
#define intel_miptree_map old_intel_miptree_map
#define intel_miptree_map_raw old_intel_miptree_map_raw
#define intel_miptree_match_image old_intel_miptree_match_image
#define intel_miptree_reference old_intel_miptree_reference
#define intel_miptree_release old_intel_miptree_release
#define intel_miptree_set_image_offset old_intel_miptree_set_image_offset
#define intel_miptree_set_level_info old_intel_miptree_set_level_info
#define intel_miptree_unmap old_intel_miptree_unmap
#define intel_miptree_unmap_raw old_intel_miptree_unmap_raw
#define i945_miptree_layout_2d old_i945_miptree_layout_2d
#define intel_get_texture_alignment_unit old_intel_get_texture_alignment_unit
#define intelInitTextureImageFuncs old_intelInitTextureImageFuncs
#define intel_miptree_create_for_teximage old_intel_miptree_create_for_teximage
#define intelSetTexBuffer old_intelSetTexBuffer
#define intelSetTexBuffer2 old_intelSetTexBuffer2
#define intelInitTextureSubImageFuncs old_intelInitTextureSubImageFuncs
#define intelInitTextureCopyImageFuncs old_intelInitTextureCopyImageFuncs
#define intel_finalize_mipmap_tree old_intel_finalize_mipmap_tree
#define intelInitTextureFuncs old_intelInitTextureFuncs
#define intel_check_blit_fragment_ops old_intel_check_blit_fragment_ops
#define intelInitPixelFuncs old_intelInitPixelFuncs
#define intelBitmap old_intelBitmap
#define intelCopyPixels old_intelCopyPixels
#define intelDrawPixels old_intelDrawPixels
#define intelReadPixels old_intelReadPixels
#define intel_check_front_buffer_rendering old_intel_check_front_buffer_rendering
#define intelInitBufferFuncs old_intelInitBufferFuncs
#define intelClearWithBlit old_intelClearWithBlit
#define intelEmitImmediateColorExpandBlit old_intelEmitImmediateColorExpandBlit
#define intel_emit_linear_blit old_intel_emit_linear_blit
#define intel_miptree_blit old_intel_miptree_blit
#define i945_miptree_layout old_i945_miptree_layout
#define intel_init_texture_formats old_intel_init_texture_formats
#define intelCalcViewport old_intelCalcViewport
#define INTEL_DEBUG old_INTEL_DEBUG
#define intelDestroyContext old_intelDestroyContext
#define intelFinish old_intelFinish
#define _intel_flush old__intel_flush
#define intel_flush_rendering_to_batch old_intel_flush_rendering_to_batch
#define intelInitContext old_intelInitContext
#define intelInitDriverFunctions old_intelInitDriverFunctions
#define intelMakeCurrent old_intelMakeCurrent
#define intel_prepare_render old_intel_prepare_render
#define intelUnbindContext old_intelUnbindContext
#define intel_update_renderbuffers old_intel_update_renderbuffers
#define aub_dump_bmp old_aub_dump_bmp
#define get_time old_get_time
#define intel_translate_blend_factor old_intel_translate_blend_factor
#define intel_translate_compare_func old_intel_translate_compare_func
#define intel_translate_shadow_compare_func old_intel_translate_shadow_compare_func
#define intel_translate_stencil_op old_intel_translate_stencil_op
#define intel_init_syncobj_functions old_intel_init_syncobj_functions
#define intelChooseRenderState old_intelChooseRenderState
#define intelFallback old_intelFallback
#define intel_finish_vb old_intel_finish_vb
#define intel_flush_prim old_intel_flush_prim
#define intel_get_prim_space old_intel_get_prim_space
#define intelInitTriFuncs old_intelInitTriFuncs
#define intel_set_prim old_intel_set_prim
#define intel_create_private_renderbuffer old_intel_create_private_renderbuffer
#define intel_create_renderbuffer old_intel_create_renderbuffer
#define intel_fbo_init old_intel_fbo_init
#define intel_get_rb_region old_intel_get_rb_region
#define intel_renderbuffer_set_draw_offset old_intel_renderbuffer_set_draw_offset
#define intel_miptree_create_for_image_buffer old_intel_miptree_create_for_image_buffer
#define intelFenceExtension old_intelFenceExtension
extern void intelDestroyContext(__DRIcontext * driContextPriv);
extern GLboolean intelUnbindContext(__DRIcontext * driContextPriv);
const __DRIextension **__driDriverGetExtensions_i830(void);
const __DRIextension **__driDriverGetExtensions_i915(void);
extern const __DRI2fenceExtension intelFenceExtension;
extern GLboolean
intelMakeCurrent(__DRIcontext * driContextPriv,
__DRIdrawable * driDrawPriv,
__DRIdrawable * driReadPriv);
double get_time(void);
void aub_dump_bmp(struct gl_context *ctx);
#endif

View File

@ -1,153 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/glheader.h"
#include "main/context.h"
#include "main/macros.h"
#include "main/enums.h"
#include "main/dd.h"
#include "intel_screen.h"
#include "intel_context.h"
int
intel_translate_shadow_compare_func(GLenum func)
{
switch (func) {
case GL_NEVER:
return COMPAREFUNC_ALWAYS;
case GL_LESS:
return COMPAREFUNC_LEQUAL;
case GL_LEQUAL:
return COMPAREFUNC_LESS;
case GL_GREATER:
return COMPAREFUNC_GEQUAL;
case GL_GEQUAL:
return COMPAREFUNC_GREATER;
case GL_NOTEQUAL:
return COMPAREFUNC_EQUAL;
case GL_EQUAL:
return COMPAREFUNC_NOTEQUAL;
case GL_ALWAYS:
return COMPAREFUNC_NEVER;
}
fprintf(stderr, "Unknown value in %s: %x\n", __func__, func);
return COMPAREFUNC_NEVER;
}
int
intel_translate_compare_func(GLenum func)
{
switch (func) {
case GL_NEVER:
return COMPAREFUNC_NEVER;
case GL_LESS:
return COMPAREFUNC_LESS;
case GL_LEQUAL:
return COMPAREFUNC_LEQUAL;
case GL_GREATER:
return COMPAREFUNC_GREATER;
case GL_GEQUAL:
return COMPAREFUNC_GEQUAL;
case GL_NOTEQUAL:
return COMPAREFUNC_NOTEQUAL;
case GL_EQUAL:
return COMPAREFUNC_EQUAL;
case GL_ALWAYS:
return COMPAREFUNC_ALWAYS;
}
fprintf(stderr, "Unknown value in %s: %x\n", __func__, func);
return COMPAREFUNC_ALWAYS;
}
int
intel_translate_stencil_op(GLenum op)
{
switch (op) {
case GL_KEEP:
return STENCILOP_KEEP;
case GL_ZERO:
return STENCILOP_ZERO;
case GL_REPLACE:
return STENCILOP_REPLACE;
case GL_INCR:
return STENCILOP_INCRSAT;
case GL_DECR:
return STENCILOP_DECRSAT;
case GL_INCR_WRAP:
return STENCILOP_INCR;
case GL_DECR_WRAP:
return STENCILOP_DECR;
case GL_INVERT:
return STENCILOP_INVERT;
default:
return STENCILOP_ZERO;
}
}
int
intel_translate_blend_factor(GLenum factor)
{
switch (factor) {
case GL_ZERO:
return BLENDFACT_ZERO;
case GL_SRC_ALPHA:
return BLENDFACT_SRC_ALPHA;
case GL_ONE:
return BLENDFACT_ONE;
case GL_SRC_COLOR:
return BLENDFACT_SRC_COLR;
case GL_ONE_MINUS_SRC_COLOR:
return BLENDFACT_INV_SRC_COLR;
case GL_DST_COLOR:
return BLENDFACT_DST_COLR;
case GL_ONE_MINUS_DST_COLOR:
return BLENDFACT_INV_DST_COLR;
case GL_ONE_MINUS_SRC_ALPHA:
return BLENDFACT_INV_SRC_ALPHA;
case GL_DST_ALPHA:
return BLENDFACT_DST_ALPHA;
case GL_ONE_MINUS_DST_ALPHA:
return BLENDFACT_INV_DST_ALPHA;
case GL_SRC_ALPHA_SATURATE:
return BLENDFACT_SRC_ALPHA_SATURATE;
case GL_CONSTANT_COLOR:
return BLENDFACT_CONST_COLOR;
case GL_ONE_MINUS_CONSTANT_COLOR:
return BLENDFACT_INV_CONST_COLOR;
case GL_CONSTANT_ALPHA:
return BLENDFACT_CONST_ALPHA;
case GL_ONE_MINUS_CONSTANT_ALPHA:
return BLENDFACT_INV_CONST_ALPHA;
}
fprintf(stderr, "Unknown value in %s: %x\n", __func__, factor);
return BLENDFACT_ZERO;
}

View File

@ -1,289 +0,0 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
/**
* \file
* \brief Support for GL_ARB_sync and EGL_KHR_fence_sync.
*
* GL_ARB_sync is implemented by flushing the current batchbuffer and keeping a
* reference on it. We can then check for completion or wait for completion
* using the normal buffer object mechanisms. This does mean that if an
* application is using many sync objects, it will emit small batchbuffers
* which may end up being a significant overhead. In other tests of removing
* gratuitous batchbuffer syncs in Mesa, it hasn't appeared to be a significant
* performance bottleneck, though.
*/
#include "intel_context.h"
#include "intel_batchbuffer.h"
#include "intel_reg.h"
struct intel_fence {
struct intel_context *intel;
/** The fence waits for completion of this batch. */
drm_intel_bo *batch_bo;
mtx_t mutex;
bool signalled;
};
struct intel_gl_sync_object {
struct gl_sync_object Base;
struct intel_fence fence;
};
static void
intel_fence_finish(struct intel_fence *fence)
{
if (fence->batch_bo)
drm_intel_bo_unreference(fence->batch_bo);
}
static void
intel_fence_insert(struct intel_context *intel, struct intel_fence *fence)
{
assert(!fence->batch_bo);
assert(!fence->signalled);
intel_batchbuffer_emit_mi_flush(intel);
fence->batch_bo = intel->batch.bo;
drm_intel_bo_reference(fence->batch_bo);
intel_batchbuffer_flush(intel);
}
static bool
intel_fence_has_completed_locked(struct intel_fence *fence)
{
if (fence->signalled)
return true;
if (fence->batch_bo && !drm_intel_bo_busy(fence->batch_bo)) {
drm_intel_bo_unreference(fence->batch_bo);
fence->batch_bo = NULL;
fence->signalled = true;
return true;
}
return false;
}
static bool
intel_fence_has_completed(struct intel_fence *fence)
{
bool ret;
mtx_lock(&fence->mutex);
ret = intel_fence_has_completed_locked(fence);
mtx_unlock(&fence->mutex);
return ret;
}
static bool
intel_fence_client_wait_locked(struct intel_context *intel, struct intel_fence *fence,
uint64_t timeout)
{
if (fence->signalled)
return true;
assert(fence->batch_bo);
/* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and returns
* immediately for timeouts <= 0. The best we can do is to clamp the
* timeout to INT64_MAX. This limits the maximum timeout from 584 years to
* 292 years - likely not a big deal.
*/
if (timeout > INT64_MAX)
timeout = INT64_MAX;
if (drm_intel_gem_bo_wait(fence->batch_bo, timeout) != 0)
return false;
fence->signalled = true;
drm_intel_bo_unreference(fence->batch_bo);
fence->batch_bo = NULL;
return true;
}
/**
* Return true if the function successfully signals or has already signalled.
* (This matches the behavior expected from __DRI2fence::client_wait_sync).
*/
static bool
intel_fence_client_wait(struct intel_context *intel, struct intel_fence *fence,
uint64_t timeout)
{
bool ret;
mtx_lock(&fence->mutex);
ret = intel_fence_client_wait_locked(intel, fence, timeout);
mtx_unlock(&fence->mutex);
return ret;
}
static void
intel_fence_server_wait(struct intel_context *intel, struct intel_fence *fence)
{
/* We have nothing to do for WaitSync. Our GL command stream is sequential,
* so given that the sync object has already flushed the batchbuffer, any
* batchbuffers coming after this waitsync will naturally not occur until
* the previous one is done.
*/
}
static struct gl_sync_object *
intel_gl_new_sync_object(struct gl_context *ctx)
{
struct intel_gl_sync_object *sync;
sync = calloc(1, sizeof(*sync));
if (!sync)
return NULL;
return &sync->Base;
}
static void
intel_gl_delete_sync_object(struct gl_context *ctx, struct gl_sync_object *s)
{
struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
intel_fence_finish(&sync->fence);
free(sync);
}
static void
intel_gl_fence_sync(struct gl_context *ctx, struct gl_sync_object *s,
GLenum condition, GLbitfield flags)
{
struct intel_context *intel = intel_context(ctx);
struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
intel_fence_insert(intel, &sync->fence);
}
static void
intel_gl_client_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
GLbitfield flags, GLuint64 timeout)
{
struct intel_context *intel = intel_context(ctx);
struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
if (intel_fence_client_wait(intel, &sync->fence, timeout))
s->StatusFlag = 1;
}
static void
intel_gl_server_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
GLbitfield flags, GLuint64 timeout)
{
struct intel_context *intel = intel_context(ctx);
struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
intel_fence_server_wait(intel, &sync->fence);
}
static void
intel_gl_check_sync(struct gl_context *ctx, struct gl_sync_object *s)
{
struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
if (intel_fence_has_completed(&sync->fence))
s->StatusFlag = 1;
}
void
intel_init_syncobj_functions(struct dd_function_table *functions)
{
functions->NewSyncObject = intel_gl_new_sync_object;
functions->DeleteSyncObject = intel_gl_delete_sync_object;
functions->FenceSync = intel_gl_fence_sync;
functions->CheckSync = intel_gl_check_sync;
functions->ClientWaitSync = intel_gl_client_wait_sync;
functions->ServerWaitSync = intel_gl_server_wait_sync;
}
static void *
intel_dri_create_fence(__DRIcontext *ctx)
{
struct intel_context *intel = ctx->driverPrivate;
struct intel_fence *fence;
fence = calloc(1, sizeof(*fence));
if (!fence)
return NULL;
mtx_init(&fence->mutex, mtx_plain);
fence->intel = intel;
intel_fence_insert(intel, fence);
return fence;
}
static void
intel_dri_destroy_fence(__DRIscreen *screen, void *driver_fence)
{
struct intel_fence *fence = driver_fence;
intel_fence_finish(fence);
free(fence);
}
static GLboolean
intel_dri_client_wait_sync(__DRIcontext *ctx, void *driver_fence, unsigned flags,
uint64_t timeout)
{
struct intel_fence *fence = driver_fence;
return intel_fence_client_wait(fence->intel, fence, timeout);
}
static void
intel_dri_server_wait_sync(__DRIcontext *ctx, void *driver_fence, unsigned flags)
{
struct intel_fence *fence = driver_fence;
/* We might be called here with a NULL fence as a result of WaitSyncKHR
* on a EGL_KHR_reusable_sync fence. Nothing to do here in such case.
*/
if (!fence)
return;
intel_fence_server_wait(fence->intel, fence);
}
const __DRI2fenceExtension intelFenceExtension = {
.base = { __DRI2_FENCE, 1 },
.create_fence = intel_dri_create_fence,
.destroy_fence = intel_dri_destroy_fence,
.client_wait_sync = intel_dri_client_wait_sync,
.server_wait_sync = intel_dri_server_wait_sync,
.get_fence_from_cl_event = NULL,
};

View File

@ -1,179 +0,0 @@
#include "swrast/swrast.h"
#include "main/renderbuffer.h"
#include "main/texobj.h"
#include "main/teximage.h"
#include "main/mipmap.h"
#include "drivers/common/meta.h"
#include "util/u_memory.h"
#include "intel_context.h"
#include "intel_mipmap_tree.h"
#include "intel_tex.h"
#include "intel_fbo.h"
#define FILE_DEBUG_FLAG DEBUG_TEXTURE
static struct gl_texture_image *
intelNewTextureImage(struct gl_context * ctx)
{
DBG("%s\n", __func__);
(void) ctx;
return (struct gl_texture_image *) CALLOC_STRUCT(intel_texture_image);
}
static void
intelDeleteTextureImage(struct gl_context * ctx, struct gl_texture_image *img)
{
/* nothing special (yet) for intel_texture_image */
_mesa_delete_texture_image(ctx, img);
}
static struct gl_texture_object *
intelNewTextureObject(struct gl_context * ctx, GLuint name, GLenum target)
{
struct intel_texture_object *obj = CALLOC_STRUCT(intel_texture_object);
(void) ctx;
DBG("%s\n", __func__);
if (obj == NULL)
return NULL;
_mesa_initialize_texture_object(ctx, &obj->base, name, target);
obj->needs_validate = true;
return &obj->base;
}
static void
intelDeleteTextureObject(struct gl_context *ctx,
struct gl_texture_object *texObj)
{
struct intel_texture_object *intelObj = intel_texture_object(texObj);
intel_miptree_release(&intelObj->mt);
_mesa_delete_texture_object(ctx, texObj);
}
static GLboolean
intel_alloc_texture_image_buffer(struct gl_context *ctx,
struct gl_texture_image *image)
{
struct intel_context *intel = intel_context(ctx);
struct intel_texture_image *intel_image = intel_texture_image(image);
struct gl_texture_object *texobj = image->TexObject;
struct intel_texture_object *intel_texobj = intel_texture_object(texobj);
assert(image->Border == 0);
/* Because the driver uses AllocTextureImageBuffer() internally, it may end
* up mismatched with FreeTextureImageBuffer(), but that is safe to call
* multiple times.
*/
ctx->Driver.FreeTextureImageBuffer(ctx, image);
if (!_swrast_init_texture_image(image))
return false;
if (intel_texobj->mt &&
intel_miptree_match_image(intel_texobj->mt, image)) {
intel_miptree_reference(&intel_image->mt, intel_texobj->mt);
DBG("%s: alloc obj %p level %d %dx%dx%d using object's miptree %p\n",
__func__, texobj, image->Level,
image->Width, image->Height, image->Depth, intel_texobj->mt);
} else {
intel_image->mt = intel_miptree_create_for_teximage(intel, intel_texobj,
intel_image,
false);
/* Even if the object currently has a mipmap tree associated
* with it, this one is a more likely candidate to represent the
* whole object since our level didn't fit what was there
* before, and any lower levels would fit into our miptree.
*/
intel_miptree_reference(&intel_texobj->mt, intel_image->mt);
DBG("%s: alloc obj %p level %d %dx%dx%d using new miptree %p\n",
__func__, texobj, image->Level,
image->Width, image->Height, image->Depth, intel_image->mt);
}
intel_texobj->needs_validate = true;
return true;
}
static void
intel_free_texture_image_buffer(struct gl_context * ctx,
struct gl_texture_image *texImage)
{
struct intel_texture_image *intelImage = intel_texture_image(texImage);
DBG("%s\n", __func__);
intel_miptree_release(&intelImage->mt);
_swrast_free_texture_image_buffer(ctx, texImage);
}
/**
* Map texture memory/buffer into user space.
* Note: the region of interest parameters are ignored here.
* \param mode bitmask of GL_MAP_READ_BIT, GL_MAP_WRITE_BIT
* \param mapOut returns start of mapping of region of interest
* \param rowStrideOut returns row stride in bytes
*/
static void
intel_map_texture_image(struct gl_context *ctx,
struct gl_texture_image *tex_image,
GLuint slice,
GLuint x, GLuint y, GLuint w, GLuint h,
GLbitfield mode,
GLubyte **map,
GLint *stride)
{
struct intel_context *intel = intel_context(ctx);
struct intel_texture_image *intel_image = intel_texture_image(tex_image);
struct intel_mipmap_tree *mt = intel_image->mt;
/* Our texture data is always stored in a miptree. */
assert(mt);
/* intel_miptree_map operates on a unified "slice" number that references the
* cube face, since it's all just slices to the miptree code.
*/
if (tex_image->TexObject->Target == GL_TEXTURE_CUBE_MAP)
slice = tex_image->Face;
intel_miptree_map(intel, mt, tex_image->Level, slice, x, y, w, h, mode,
(void **)map, stride);
}
static void
intel_unmap_texture_image(struct gl_context *ctx,
struct gl_texture_image *tex_image, GLuint slice)
{
struct intel_context *intel = intel_context(ctx);
struct intel_texture_image *intel_image = intel_texture_image(tex_image);
struct intel_mipmap_tree *mt = intel_image->mt;
if (tex_image->TexObject->Target == GL_TEXTURE_CUBE_MAP)
slice = tex_image->Face;
intel_miptree_unmap(intel, mt, tex_image->Level, slice);
}
void
intelInitTextureFuncs(struct dd_function_table *functions)
{
functions->NewTextureObject = intelNewTextureObject;
functions->NewTextureImage = intelNewTextureImage;
functions->DeleteTextureImage = intelDeleteTextureImage;
functions->DeleteTexture = intelDeleteTextureObject;
functions->AllocTextureImageBuffer = intel_alloc_texture_image_buffer;
functions->FreeTextureImageBuffer = intel_free_texture_image_buffer;
functions->MapTextureImage = intel_map_texture_image;
functions->UnmapTextureImage = intel_unmap_texture_image;
}

View File

@ -1,71 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTELTEX_INC
#define INTELTEX_INC
#include "main/mtypes.h"
#include "main/formats.h"
#include "intel_context.h"
struct intel_renderbuffer;
void intelInitTextureFuncs(struct dd_function_table *functions);
void intelInitTextureImageFuncs(struct dd_function_table *functions);
void intelInitTextureSubImageFuncs(struct dd_function_table *functions);
void intelInitTextureCopyImageFuncs(struct dd_function_table *functions);
void intelSetTexBuffer(__DRIcontext *pDRICtx,
GLint target, __DRIdrawable *pDraw);
void intelSetTexBuffer2(__DRIcontext *pDRICtx,
GLint target, GLint format, __DRIdrawable *pDraw);
struct intel_mipmap_tree *
intel_miptree_create_for_teximage(struct intel_context *intel,
struct intel_texture_object *intelObj,
struct intel_texture_image *intelImage,
bool expect_accelerated_upload);
GLuint intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit);
void intel_tex_map_level_images(struct intel_context *intel,
struct intel_texture_object *intelObj,
int level,
GLbitfield mode);
void intel_tex_unmap_level_images(struct intel_context *intel,
struct intel_texture_object *intelObj,
int level);
bool
intel_tex_image_s8z24_create_renderbuffers(struct intel_context *intel,
struct intel_texture_image *image);
#endif

View File

@ -1,111 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/mtypes.h"
#include "main/enums.h"
#include "main/image.h"
#include "main/teximage.h"
#include "main/texstate.h"
#include "main/fbobject.h"
#include "drivers/common/meta.h"
#include "intel_screen.h"
#include "intel_context.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
#include "intel_fbo.h"
#include "intel_tex.h"
#include "intel_blit.h"
#define FILE_DEBUG_FLAG DEBUG_TEXTURE
static bool
intel_copy_texsubimage(struct intel_context *intel,
struct intel_texture_image *intelImage,
GLint dstx, GLint dsty, GLint slice,
struct intel_renderbuffer *irb,
GLint x, GLint y, GLsizei width, GLsizei height)
{
const GLenum internalFormat = intelImage->base.Base.InternalFormat;
intel_prepare_render(intel);
if (!intelImage->mt || !irb || !irb->mt) {
if (unlikely(INTEL_DEBUG & DEBUG_PERF))
fprintf(stderr, "%s fail %p %p (0x%08x)\n",
__func__, intelImage->mt, irb, internalFormat);
return false;
}
/* blit from src buffer to texture */
if (!intel_miptree_blit(intel,
irb->mt, irb->mt_level, irb->mt_layer,
x, y, irb->Base.Base.Name == 0,
intelImage->mt, intelImage->base.Base.Level,
intelImage->base.Base.Face + slice,
dstx, dsty, false,
width, height, COLOR_LOGICOP_COPY)) {
return false;
}
return true;
}
static void
intelCopyTexSubImage(struct gl_context *ctx, GLuint dims,
struct gl_texture_image *texImage,
GLint xoffset, GLint yoffset, GLint slice,
struct gl_renderbuffer *rb,
GLint x, GLint y,
GLsizei width, GLsizei height)
{
struct intel_context *intel = intel_context(ctx);
/* Try the BLT engine. */
if (intel_copy_texsubimage(intel,
intel_texture_image(texImage),
xoffset, yoffset, slice,
intel_renderbuffer(rb), x, y, width, height)) {
return;
}
/* Otherwise, fall back to meta. This will likely be slow. */
perf_debug("%s - fallback to swrast\n", __func__);
_mesa_meta_CopyTexSubImage(ctx, dims, texImage,
xoffset, yoffset, slice,
rb, x, y, width, height);
}
void
intelInitTextureCopyImageFuncs(struct dd_function_table *functions)
{
functions->CopyTexSubImage = intelCopyTexSubImage;
}

View File

@ -1,363 +0,0 @@
#include "main/glheader.h"
#include "main/macros.h"
#include "main/mtypes.h"
#include "main/enums.h"
#include "main/bufferobj.h"
#include "main/context.h"
#include "main/formats.h"
#include "main/image.h"
#include "main/pbo.h"
#include "main/renderbuffer.h"
#include "main/texcompress.h"
#include "main/texgetimage.h"
#include "main/texobj.h"
#include "main/teximage.h"
#include "main/texstore.h"
#include "intel_context.h"
#include "intel_mipmap_tree.h"
#include "intel_buffer_objects.h"
#include "intel_batchbuffer.h"
#include "intel_tex.h"
#include "intel_blit.h"
#include "intel_fbo.h"
#define FILE_DEBUG_FLAG DEBUG_TEXTURE
/* Work back from the specified level of the image to the baselevel and create a
* miptree of that size.
*/
struct intel_mipmap_tree *
intel_miptree_create_for_teximage(struct intel_context *intel,
struct intel_texture_object *intelObj,
struct intel_texture_image *intelImage,
bool expect_accelerated_upload)
{
GLuint firstLevel;
GLuint lastLevel;
int width, height, depth;
GLuint i;
intel_miptree_get_dimensions_for_image(&intelImage->base.Base,
&width, &height, &depth);
DBG("%s\n", __func__);
if (intelImage->base.Base.Level > intelObj->base.Attrib.BaseLevel &&
(width == 1 ||
(intelObj->base.Target != GL_TEXTURE_1D && height == 1) ||
(intelObj->base.Target == GL_TEXTURE_3D && depth == 1))) {
/* For this combination, we're at some lower mipmap level and
* some important dimension is 1. We can't extrapolate up to a
* likely base level width/height/depth for a full mipmap stack
* from this info, so just allocate this one level.
*/
firstLevel = intelImage->base.Base.Level;
lastLevel = intelImage->base.Base.Level;
} else {
/* If this image disrespects BaseLevel, allocate from level zero.
* Usually BaseLevel == 0, so it's unlikely to happen.
*/
if (intelImage->base.Base.Level < intelObj->base.Attrib.BaseLevel)
firstLevel = 0;
else
firstLevel = intelObj->base.Attrib.BaseLevel;
/* Figure out image dimensions at start level. */
for (i = intelImage->base.Base.Level; i > firstLevel; i--) {
width <<= 1;
if (height != 1)
height <<= 1;
if (depth != 1)
depth <<= 1;
}
/* Guess a reasonable value for lastLevel. This is probably going
* to be wrong fairly often and might mean that we have to look at
* resizable buffers, or require that buffers implement lazy
* pagetable arrangements.
*/
if ((intelObj->base.Sampler.Attrib.MinFilter == GL_NEAREST ||
intelObj->base.Sampler.Attrib.MinFilter == GL_LINEAR) &&
intelImage->base.Base.Level == firstLevel) {
lastLevel = firstLevel;
} else {
lastLevel = (firstLevel +
_mesa_get_tex_max_num_levels(intelObj->base.Target,
width, height, depth) - 1);
}
}
return intel_miptree_create(intel,
intelObj->base.Target,
intelImage->base.Base.TexFormat,
firstLevel,
lastLevel,
width,
height,
depth,
expect_accelerated_upload,
INTEL_MIPTREE_TILING_ANY);
}
/* XXX: Do this for TexSubImage also:
*/
static bool
try_pbo_upload(struct gl_context *ctx,
struct gl_texture_image *image,
const struct gl_pixelstore_attrib *unpack,
GLenum format, GLenum type, const void *pixels)
{
struct intel_texture_image *intelImage = intel_texture_image(image);
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
GLuint src_offset;
drm_intel_bo *src_buffer;
if (!unpack->BufferObj)
return false;
DBG("trying pbo upload\n");
if (intel->ctx._ImageTransferState ||
unpack->SkipPixels || unpack->SkipRows) {
DBG("%s: image transfer\n", __func__);
return false;
}
ctx->Driver.AllocTextureImageBuffer(ctx, image);
if (!intelImage->mt) {
DBG("%s: no miptree\n", __func__);
return false;
}
if (!_mesa_format_matches_format_and_type(intelImage->mt->format,
format, type, false, NULL)) {
DBG("%s: format mismatch (upload to %s with format 0x%x, type 0x%x)\n",
__func__, _mesa_get_format_name(intelImage->mt->format),
format, type);
return false;
}
src_buffer = intel_bufferobj_source(intel, pbo, 64, &src_offset);
/* note: potential 64-bit ptr to 32-bit int cast */
src_offset += (GLuint) (unsigned long) pixels;
int src_stride =
_mesa_image_row_stride(unpack, image->Width, format, type);
struct intel_mipmap_tree *pbo_mt =
intel_miptree_create_for_bo(intel,
src_buffer,
intelImage->mt->format,
src_offset,
image->Width, image->Height,
src_stride, I915_TILING_NONE);
if (!pbo_mt)
return false;
if (!intel_miptree_blit(intel,
pbo_mt, 0, 0,
0, 0, false,
intelImage->mt, image->Level, image->Face,
0, 0, false,
image->Width, image->Height, COLOR_LOGICOP_COPY)) {
DBG("%s: blit failed\n", __func__);
intel_miptree_release(&pbo_mt);
return false;
}
intel_miptree_release(&pbo_mt);
DBG("%s: success\n", __func__);
return true;
}
static void
intelTexImage(struct gl_context * ctx,
GLuint dims,
struct gl_texture_image *texImage,
GLenum format, GLenum type, const void *pixels,
const struct gl_pixelstore_attrib *unpack)
{
DBG("%s target %s level %d %dx%dx%d\n", __func__,
_mesa_enum_to_string(texImage->TexObject->Target),
texImage->Level, texImage->Width, texImage->Height, texImage->Depth);
/* Attempt to use the blitter for PBO image uploads.
*/
if (dims <= 2 &&
try_pbo_upload(ctx, texImage, unpack, format, type, pixels)) {
return;
}
DBG("%s: upload image %dx%dx%d pixels %p\n",
__func__, texImage->Width, texImage->Height, texImage->Depth,
pixels);
_mesa_store_teximage(ctx, dims, texImage,
format, type, pixels, unpack);
}
/**
* Binds a region to a texture image, like it was uploaded by glTexImage2D().
*
* Used for GLX_EXT_texture_from_pixmap and EGL image extensions,
*/
static void
intel_set_texture_image_region(struct gl_context *ctx,
struct gl_texture_image *image,
struct intel_region *region,
GLenum target,
GLenum internalFormat,
mesa_format format,
uint32_t offset,
GLuint width,
GLuint height,
GLuint tile_x,
GLuint tile_y)
{
struct intel_context *intel = intel_context(ctx);
struct intel_texture_image *intel_image = intel_texture_image(image);
struct gl_texture_object *texobj = image->TexObject;
struct intel_texture_object *intel_texobj = intel_texture_object(texobj);
bool has_surface_tile_offset = false;
uint32_t draw_x, draw_y;
_mesa_init_teximage_fields(&intel->ctx, image,
width, height, 1,
0, internalFormat, format);
ctx->Driver.FreeTextureImageBuffer(ctx, image);
intel_image->mt = intel_miptree_create_layout(intel, target, image->TexFormat,
0, 0,
width, height, 1);
if (intel_image->mt == NULL)
return;
intel_region_reference(&intel_image->mt->region, region);
intel_image->mt->total_width = width;
intel_image->mt->total_height = height;
intel_image->mt->level[0].slice[0].x_offset = tile_x;
intel_image->mt->level[0].slice[0].y_offset = tile_y;
intel_miptree_get_tile_offsets(intel_image->mt, 0, 0, &draw_x, &draw_y);
/* From "OES_EGL_image" error reporting. We report GL_INVALID_OPERATION
* for EGL images from non-tile aligned sufaces in gen4 hw and earlier which has
* trouble resolving back to destination image due to alignment issues.
*/
if (!has_surface_tile_offset &&
(draw_x != 0 || draw_y != 0)) {
_mesa_error(ctx, GL_INVALID_OPERATION, __func__);
intel_miptree_release(&intel_image->mt);
return;
}
intel_texobj->needs_validate = true;
intel_image->mt->offset = offset;
assert(region->pitch % region->cpp == 0);
intel_image->base.RowStride = region->pitch / region->cpp;
/* Immediately validate the image to the object. */
intel_miptree_reference(&intel_texobj->mt, intel_image->mt);
}
void
intelSetTexBuffer2(__DRIcontext *pDRICtx, GLint target,
GLint texture_format,
__DRIdrawable *dPriv)
{
struct gl_framebuffer *fb = dPriv->driverPrivate;
struct intel_context *intel = pDRICtx->driverPrivate;
struct gl_context *ctx = &intel->ctx;
struct intel_texture_object *intelObj;
struct intel_renderbuffer *rb;
struct gl_texture_object *texObj;
struct gl_texture_image *texImage;
int level = 0, internalFormat = 0;
mesa_format texFormat = MESA_FORMAT_NONE;
texObj = _mesa_get_current_tex_object(ctx, target);
intelObj = intel_texture_object(texObj);
if (!intelObj)
return;
if (dPriv->lastStamp != dPriv->dri2.stamp ||
!pDRICtx->driScreenPriv->dri2.useInvalidate)
intel_update_renderbuffers(pDRICtx, dPriv);
rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
/* If the region isn't set, then intel_update_renderbuffers was unable
* to get the buffers for the drawable.
*/
if (!rb || !rb->mt)
return;
if (rb->mt->cpp == 4) {
if (texture_format == __DRI_TEXTURE_FORMAT_RGB) {
internalFormat = GL_RGB;
texFormat = MESA_FORMAT_B8G8R8X8_UNORM;
}
else {
internalFormat = GL_RGBA;
texFormat = MESA_FORMAT_B8G8R8A8_UNORM;
}
} else if (rb->mt->cpp == 2) {
internalFormat = GL_RGB;
texFormat = MESA_FORMAT_B5G6R5_UNORM;
}
_mesa_lock_texture(&intel->ctx, texObj);
texImage = _mesa_get_tex_image(ctx, texObj, target, level);
intel_set_texture_image_region(ctx, texImage, rb->mt->region, target,
internalFormat, texFormat, 0,
rb->mt->region->width,
rb->mt->region->height,
0, 0);
_mesa_unlock_texture(&intel->ctx, texObj);
}
void
intelSetTexBuffer(__DRIcontext *pDRICtx, GLint target, __DRIdrawable *dPriv)
{
/* The old interface didn't have the format argument, so copy our
* implementation's behavior at the time.
*/
intelSetTexBuffer2(pDRICtx, target, __DRI_TEXTURE_FORMAT_RGBA, dPriv);
}
static void
intel_image_target_texture_2d(struct gl_context *ctx, GLenum target,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage,
GLeglImageOES image_handle)
{
struct intel_context *intel = intel_context(ctx);
__DRIscreen *screen;
__DRIimage *image;
screen = intel->intelScreen->driScrnPriv;
image = screen->dri2.image->lookupEGLImage(screen, image_handle,
screen->loaderPrivate);
if (image == NULL)
return;
intel_set_texture_image_region(ctx, texImage, image->region,
target, image->internal_format,
image->format, image->offset,
image->width, image->height,
image->tile_x, image->tile_y);
}
void
intelInitTextureImageFuncs(struct dd_function_table *functions)
{
functions->TexImage = intelTexImage;
functions->EGLImageTargetTexture2D = intel_image_target_texture_2d;
}

View File

@ -1,186 +0,0 @@
/**************************************************************************
*
* Copyright 2006 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors:
* Keith Whitwell <keithw@vmware.com>
* Michel Dänzer <daenzer@vmware.com>
*/
#include "intel_mipmap_tree.h"
#include "intel_tex_layout.h"
#include "intel_context.h"
#include "main/image.h"
#include "main/macros.h"
static unsigned int
intel_horizontal_texture_alignment_unit(struct intel_context *intel,
mesa_format format)
{
/**
* From the "Alignment Unit Size" section of various specs, namely:
* - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4
* - i965 and G45 PRMs: Volume 1, Section 6.17.3.4.
* - Ironlake and Sandybridge PRMs: Volume 1, Part 1, Section 7.18.3.4
* - BSpec (for Ivybridge and slight variations in separate stencil)
*
* +----------------------------------------------------------------------+
* | | alignment unit width ("i") |
* | Surface Property |-----------------------------|
* | | 915 | 965 | ILK | SNB | IVB |
* +----------------------------------------------------------------------+
* | YUV 4:2:2 format | 8 | 4 | 4 | 4 | 4 |
* | BC1-5 compressed format (DXTn/S3TC) | 4 | 4 | 4 | 4 | 4 |
* | FXT1 compressed format | 8 | 8 | 8 | 8 | 8 |
* | Depth Buffer (16-bit) | 4 | 4 | 4 | 4 | 8 |
* | Depth Buffer (other) | 4 | 4 | 4 | 4 | 4 |
* | Separate Stencil Buffer | N/A | N/A | 8 | 8 | 8 |
* | All Others | 4 | 4 | 4 | 4 | 4 |
* +----------------------------------------------------------------------+
*
* On IVB+, non-special cases can be overridden by setting the SURFACE_STATE
* "Surface Horizontal Alignment" field to HALIGN_4 or HALIGN_8.
*/
if (_mesa_is_format_compressed(format)) {
/* The hardware alignment requirements for compressed textures
* happen to match the block boundaries.
*/
unsigned int i, j;
_mesa_get_format_block_size(format, &i, &j);
return i;
}
return 4;
}
static unsigned int
intel_vertical_texture_alignment_unit(struct intel_context *intel,
mesa_format format)
{
/**
* From the "Alignment Unit Size" section of various specs, namely:
* - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4
* - i965 and G45 PRMs: Volume 1, Section 6.17.3.4.
* - Ironlake and Sandybridge PRMs: Volume 1, Part 1, Section 7.18.3.4
* - BSpec (for Ivybridge and slight variations in separate stencil)
*
* +----------------------------------------------------------------------+
* | | alignment unit height ("j") |
* | Surface Property |-----------------------------|
* | | 915 | 965 | ILK | SNB | IVB |
* +----------------------------------------------------------------------+
* | BC1-5 compressed format (DXTn/S3TC) | 4 | 4 | 4 | 4 | 4 |
* | FXT1 compressed format | 4 | 4 | 4 | 4 | 4 |
* | Depth Buffer | 2 | 2 | 2 | 4 | 4 |
* | Separate Stencil Buffer | N/A | N/A | N/A | 4 | 8 |
* | All Others | 2 | 2 | 2 | 2 | 2 |
* +----------------------------------------------------------------------+
*
* On SNB+, non-special cases can be overridden by setting the SURFACE_STATE
* "Surface Vertical Alignment" field to VALIGN_2 or VALIGN_4.
*/
if (_mesa_is_format_compressed(format))
return 4;
return 2;
}
void
intel_get_texture_alignment_unit(struct intel_context *intel,
mesa_format format,
unsigned int *w, unsigned int *h)
{
*w = intel_horizontal_texture_alignment_unit(intel, format);
*h = intel_vertical_texture_alignment_unit(intel, format);
}
void i945_miptree_layout_2d(struct intel_mipmap_tree *mt)
{
GLuint level;
GLuint x = 0;
GLuint y = 0;
GLuint width = mt->physical_width0;
GLuint height = mt->physical_height0;
GLuint depth = mt->physical_depth0; /* number of array layers. */
mt->total_width = mt->physical_width0;
if (mt->compressed) {
mt->total_width = ALIGN(mt->physical_width0, mt->align_w);
}
/* May need to adjust width to accommodate the placement of
* the 2nd mipmap. This occurs when the alignment
* constraints of mipmap placement push the right edge of the
* 2nd mipmap out past the width of its parent.
*/
if (mt->first_level != mt->last_level) {
GLuint mip1_width;
if (mt->compressed) {
mip1_width = ALIGN(minify(mt->physical_width0, 1), mt->align_w) +
ALIGN(minify(mt->physical_width0, 2), mt->align_w);
} else {
mip1_width = ALIGN(minify(mt->physical_width0, 1), mt->align_w) +
minify(mt->physical_width0, 2);
}
if (mip1_width > mt->total_width) {
mt->total_width = mip1_width;
}
}
mt->total_height = 0;
for ( level = mt->first_level ; level <= mt->last_level ; level++ ) {
GLuint img_height;
intel_miptree_set_level_info(mt, level, x, y, width,
height, depth);
img_height = ALIGN(height, mt->align_h);
if (mt->compressed)
img_height /= mt->align_h;
/* Because the images are packed better, the final offset
* might not be the maximal one:
*/
mt->total_height = MAX2(mt->total_height, y + img_height);
/* Layout_below: step right after second mipmap.
*/
if (level == mt->first_level + 1) {
x += ALIGN(width, mt->align_w);
}
else {
y += img_height;
}
width = minify(width, 1);
height = minify(height, 1);
}
}

View File

@ -1,40 +0,0 @@
/**************************************************************************
*
* Copyright 2006 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors:
* Keith Whitwell <keithw@vmware.com>
* Michel Dänzer <daenzer@vmware.com>
*/
#include "main/macros.h"
extern void i945_miptree_layout_2d(struct intel_mipmap_tree *mt);
void
intel_get_texture_alignment_unit(struct intel_context *intel,
mesa_format format,
unsigned int *w, unsigned int *h);

View File

@ -1,84 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef _INTEL_TEX_OBJ_H
#define _INTEL_TEX_OBJ_H
#include "swrast/s_context.h"
struct intel_texture_object
{
struct gl_texture_object base;
/* This is a mirror of base._MaxLevel, updated at validate time,
* except that we don't bother with the non-base levels for
* non-mipmapped textures.
*/
unsigned int _MaxLevel;
/* On validation any active images held in main memory or in other
* regions will be copied to this region and the old storage freed.
*/
struct intel_mipmap_tree *mt;
/**
* Set when mipmap trees in the texture images of this texture object
* might not all be the mipmap tree above.
*/
bool needs_validate;
};
/**
* intel_texture_image is a subclass of swrast_texture_image because we
* sometimes fall back to using the swrast module for software rendering.
*/
struct intel_texture_image
{
struct swrast_texture_image base;
/* If intelImage->mt != NULL, image data is stored here.
* Else if intelImage->base.Buffer != NULL, image is stored there.
* Else there is no image data.
*/
struct intel_mipmap_tree *mt;
};
static inline struct intel_texture_object *
intel_texture_object(struct gl_texture_object *obj)
{
return (struct intel_texture_object *) obj;
}
static inline struct intel_texture_image *
intel_texture_image(struct gl_texture_image *img)
{
return (struct intel_texture_image *) img;
}
#endif /* _INTEL_TEX_OBJ_H */

View File

@ -1,155 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/bufferobj.h"
#include "main/macros.h"
#include "main/mtypes.h"
#include "main/pbo.h"
#include "main/texobj.h"
#include "main/texstore.h"
#include "main/texcompress.h"
#include "main/enums.h"
#include "intel_batchbuffer.h"
#include "intel_context.h"
#include "intel_tex.h"
#include "intel_mipmap_tree.h"
#include "intel_blit.h"
#define FILE_DEBUG_FLAG DEBUG_TEXTURE
static bool
intel_blit_texsubimage(struct gl_context * ctx,
struct gl_texture_image *texImage,
GLint xoffset, GLint yoffset,
GLint width, GLint height,
GLenum format, GLenum type, const void *pixels,
const struct gl_pixelstore_attrib *packing)
{
struct intel_context *intel = intel_context(ctx);
struct intel_texture_image *intelImage = intel_texture_image(texImage);
/* Try to do a blit upload of the subimage if the texture is
* currently busy.
*/
if (!intelImage->mt)
return false;
/* The blitter can't handle Y tiling */
if (intelImage->mt->region->tiling == I915_TILING_Y)
return false;
if (texImage->TexObject->Target != GL_TEXTURE_2D)
return false;
if (!drm_intel_bo_busy(intelImage->mt->region->bo))
return false;
DBG("BLT subimage %s target %s level %d offset %d,%d %dx%d\n",
__func__,
_mesa_enum_to_string(texImage->TexObject->Target),
texImage->Level, xoffset, yoffset, width, height);
pixels = _mesa_validate_pbo_teximage(ctx, 2, width, height, 1,
format, type, pixels, packing,
"glTexSubImage");
if (!pixels)
return false;
struct intel_mipmap_tree *temp_mt =
intel_miptree_create(intel, GL_TEXTURE_2D, texImage->TexFormat,
0, 0,
width, height, 1,
false, INTEL_MIPTREE_TILING_NONE);
if (!temp_mt)
goto err;
GLubyte *dst = intel_miptree_map_raw(intel, temp_mt);
if (!dst)
goto err;
if (!_mesa_texstore(ctx, 2, texImage->_BaseFormat,
texImage->TexFormat,
temp_mt->region->pitch,
&dst,
width, height, 1,
format, type, pixels, packing)) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
}
intel_miptree_unmap_raw(temp_mt);
bool ret;
ret = intel_miptree_blit(intel,
temp_mt, 0, 0,
0, 0, false,
intelImage->mt, texImage->Level, texImage->Face,
xoffset, yoffset, false,
width, height, COLOR_LOGICOP_COPY);
assert(ret);
intel_miptree_release(&temp_mt);
_mesa_unmap_teximage_pbo(ctx, packing);
return ret;
err:
_mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
intel_miptree_release(&temp_mt);
_mesa_unmap_teximage_pbo(ctx, packing);
return false;
}
static void
intelTexSubImage(struct gl_context * ctx,
GLuint dims,
struct gl_texture_image *texImage,
GLint xoffset, GLint yoffset, GLint zoffset,
GLsizei width, GLsizei height, GLsizei depth,
GLenum format, GLenum type,
const GLvoid * pixels,
const struct gl_pixelstore_attrib *packing)
{
/* The intel_blit_texsubimage() function only handles 2D images */
if (dims != 2 || !intel_blit_texsubimage(ctx, texImage,
xoffset, yoffset,
width, height,
format, type, pixels, packing)) {
_mesa_store_texsubimage(ctx, dims, texImage,
xoffset, yoffset, zoffset,
width, height, depth,
format, type, pixels, packing);
}
}
void
intelInitTextureSubImageFuncs(struct dd_function_table *functions)
{
functions->TexSubImage = intelTexSubImage;
}

View File

@ -1,141 +0,0 @@
#include "main/mtypes.h"
#include "main/macros.h"
#include "main/samplerobj.h"
#include "main/teximage.h"
#include "main/texobj.h"
#include "intel_context.h"
#include "intel_mipmap_tree.h"
#include "intel_blit.h"
#include "intel_tex.h"
#include "intel_tex_layout.h"
#define FILE_DEBUG_FLAG DEBUG_TEXTURE
/**
* When validating, we only care about the texture images that could
* be seen, so for non-mipmapped modes we want to ignore everything
* but BaseLevel.
*/
static void
intel_update_max_level(struct intel_texture_object *intelObj,
struct gl_sampler_object *sampler)
{
struct gl_texture_object *tObj = &intelObj->base;
int maxlevel;
if (sampler->Attrib.MinFilter == GL_NEAREST ||
sampler->Attrib.MinFilter == GL_LINEAR) {
maxlevel = tObj->Attrib.BaseLevel;
} else {
maxlevel = tObj->_MaxLevel;
}
if (intelObj->_MaxLevel != maxlevel) {
intelObj->_MaxLevel = maxlevel;
intelObj->needs_validate = true;
}
}
/*
*/
GLuint
intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
{
struct gl_context *ctx = &intel->ctx;
struct gl_texture_object *tObj = intel->ctx.Texture.Unit[unit]._Current;
struct intel_texture_object *intelObj = intel_texture_object(tObj);
struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
GLuint face, i;
GLuint nr_faces = 0;
struct intel_texture_image *firstImage;
int width, height, depth;
/* TBOs require no validation -- they always just point to their BO. */
if (tObj->Target == GL_TEXTURE_BUFFER)
return true;
/* We know/require this is true by now:
*/
assert(intelObj->base._BaseComplete);
/* What levels must the tree include at a minimum?
*/
intel_update_max_level(intelObj, sampler);
if (intelObj->mt && intelObj->mt->first_level != tObj->Attrib.BaseLevel)
intelObj->needs_validate = true;
if (!intelObj->needs_validate)
return true;
firstImage = intel_texture_image(tObj->Image[0][tObj->Attrib.BaseLevel]);
/* Check tree can hold all active levels. Check tree matches
* target, imageFormat, etc.
*
* For pre-gen4, we have to match first_level == tObj->BaseLevel,
* because we don't have the control that gen4 does to make min/mag
* determination happen at a nonzero (hardware) baselevel. Because
* of that, we just always relayout on baselevel change.
*/
if (intelObj->mt &&
(!intel_miptree_match_image(intelObj->mt, &firstImage->base.Base) ||
intelObj->mt->first_level != tObj->Attrib.BaseLevel ||
intelObj->mt->last_level < intelObj->_MaxLevel)) {
intel_miptree_release(&intelObj->mt);
}
/* May need to create a new tree:
*/
if (!intelObj->mt) {
intel_miptree_get_dimensions_for_image(&firstImage->base.Base,
&width, &height, &depth);
perf_debug("Creating new %s %dx%dx%d %d..%d miptree to handle finalized "
"texture miptree.\n",
_mesa_get_format_name(firstImage->base.Base.TexFormat),
width, height, depth, tObj->Attrib.BaseLevel, intelObj->_MaxLevel);
intelObj->mt = intel_miptree_create(intel,
intelObj->base.Target,
firstImage->base.Base.TexFormat,
tObj->Attrib.BaseLevel,
intelObj->_MaxLevel,
width,
height,
depth,
true,
INTEL_MIPTREE_TILING_ANY);
if (!intelObj->mt)
return false;
}
/* Pull in any images not in the object's tree:
*/
nr_faces = _mesa_num_tex_faces(intelObj->base.Target);
for (face = 0; face < nr_faces; face++) {
for (i = tObj->Attrib.BaseLevel; i <= intelObj->_MaxLevel; i++) {
struct intel_texture_image *intelImage =
intel_texture_image(intelObj->base.Image[face][i]);
/* skip too small size mipmap */
if (intelImage == NULL)
break;
if (intelObj->mt != intelImage->mt) {
intel_miptree_copy_teximage(intel, intelImage, intelObj->mt,
false /* invalidate */);
}
/* After we're done, we'd better agree that our layout is
* appropriate, or we'll end up hitting this function again on the
* next draw
*/
assert(intel_miptree_match_image(intelObj->mt, &intelImage->base.Base));
}
}
intelObj->needs_validate = false;
return true;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,52 +0,0 @@
/**************************************************************************
*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTELTRIS_INC
#define INTELTRIS_INC
#include "main/mtypes.h"
#define INTEL_VB_SIZE (32 * 1024)
/** 3 dwords of state_immediate and 2 of 3dprim, in intel_flush_prim */
#define INTEL_PRIM_EMIT_SIZE (5 * 4)
#define _INTEL_NEW_RENDERSTATE (_NEW_LINE | \
_NEW_POLYGON | \
_NEW_LIGHT | \
_NEW_PROGRAM | \
_NEW_POLYGONSTIPPLE)
extern void intelInitTriFuncs(struct gl_context * ctx);
extern void intelChooseRenderState(struct gl_context * ctx);
void intel_set_prim(struct intel_context *intel, uint32_t prim);
GLuint *intel_get_prim_space(struct intel_context *intel, unsigned int count);
void intel_flush_prim(struct intel_context *intel);
void intel_finish_vb(struct intel_context *intel);
#endif

View File

@ -1,94 +0,0 @@
# Copyright © 2017 Intel Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
files_i915 = files(
'i830_context.c',
'i830_context.h',
'i830_reg.h',
'i830_state.c',
'i830_texblend.c',
'i830_texstate.c',
'i830_vtbl.c',
'i915_context.c',
'i915_context.h',
'i915_debug_fp.c',
'i915_debug.h',
'i915_fragprog.c',
'i915_program.c',
'i915_program.h',
'i915_reg.h',
'i915_state.c',
'i915_tex_layout.c',
'i915_texstate.c',
'i915_vtbl.c',
'intel_batchbuffer.c',
'intel_batchbuffer.h',
'intel_blit.c',
'intel_blit.h',
'intel_buffer_objects.c',
'intel_buffer_objects.h',
'intel_buffers.c',
'intel_buffers.h',
'intel_chipset.h',
'intel_clear.c',
'intel_clear.h',
'intel_context.c',
'intel_context.h',
'intel_extensions.c',
'intel_extensions.h',
'intel_fbo.c',
'intel_fbo.h',
'intel_mipmap_tree.c',
'intel_mipmap_tree.h',
'intel_pixel_bitmap.c',
'intel_pixel.c',
'intel_pixel_copy.c',
'intel_pixel_draw.c',
'intel_pixel.h',
'intel_pixel_read.c',
'intel_reg.h',
'intel_regions.c',
'intel_regions.h',
'intel_render.c',
'intel_screen.c',
'intel_screen.h',
'intel_state.c',
'intel_syncobj.c',
'intel_tex.c',
'intel_tex_copy.c',
'intel_tex.h',
'intel_tex_image.c',
'intel_tex_layout.c',
'intel_tex_layout.h',
'intel_tex_obj.h',
'intel_tex_subimage.c',
'intel_tex_validate.c',
'intel_tris.c',
'intel_tris.h',
)
libi915 = static_library(
'i915',
files_i915,
include_directories : [inc_include, inc_src, inc_mapi, inc_mesa, inc_gallium, inc_gallium_aux, inc_dri_common, inc_util],
c_args : [no_override_init_args],
gnu_symbol_visibility : 'hidden',
dependencies : [dep_libdrm, dep_libdrm_intel, idep_mesautil],
)

View File

@ -22,12 +22,6 @@ subdir('common')
_dri_drivers = []
_dri_link = []
if with_dri_i915
subdir('i915')
_dri_drivers += libi915
_dri_link += 'i830_dri.so'
_dri_link += 'i915_dri.so'
endif
if with_dri_i965
subdir('i965')
_dri_drivers += libi965