st/xa: Initial import of the xa state-tracker and the xa-vmwgfx target.

See the file src/gallium/state_trackers/xa/README for more info.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
This commit is contained in:
Thomas Hellstrom 2011-06-15 10:46:24 +02:00
parent 40aec11b75
commit 9f2f5b3d7f
16 changed files with 2709 additions and 3 deletions

View File

@ -1559,6 +1559,9 @@ yes)
fi
have_st_vega="yes"
;;
xa)
HAVE_ST_XA="yes"
;;
esac
if test -n "$tracker"; then
@ -1730,7 +1733,8 @@ dnl
dnl Gallium helper functions
dnl
gallium_check_st() {
if test "x$HAVE_ST_DRI" = xyes || test "x$HAVE_ST_XORG" = xyes; then
if test "x$HAVE_ST_DRI" = xyes || test "x$HAVE_ST_XORG" = xyes ||
test "x$HAVE_ST_XA" = xyes; then
GALLIUM_WINSYS_DIRS="$GALLIUM_WINSYS_DIRS $1"
fi
if test "x$HAVE_ST_DRI" = xyes && test "x$2" != x; then
@ -1739,6 +1743,9 @@ gallium_check_st() {
if test "x$HAVE_ST_XORG" = xyes && test "x$3" != x; then
GALLIUM_TARGET_DIRS="$GALLIUM_TARGET_DIRS $3"
fi
if test "x$HAVE_ST_XA" = xyes && test "x$4" != x; then
GALLIUM_TARGET_DIRS="$GALLIUM_TARGET_DIRS $4"
fi
}
gallium_require_llvm() {
@ -1749,7 +1756,6 @@ gallium_require_llvm() {
fi
}
dnl
dnl Gallium SVGA configuration
dnl
@ -1760,7 +1766,7 @@ AC_ARG_ENABLE([gallium-svga],
[enable_gallium_svga=auto])
if test "x$enable_gallium_svga" = xyes; then
GALLIUM_DRIVERS_DIRS="$GALLIUM_DRIVERS_DIRS svga"
gallium_check_st "svga/drm" "dri-vmwgfx" "xorg-vmwgfx"
gallium_check_st "svga/drm" "dri-vmwgfx" "xorg-vmwgfx" "xa-vmwgfx"
elif test "x$enable_gallium_svga" = xauto; then
GALLIUM_DRIVERS_DIRS="$GALLIUM_DRIVERS_DIRS svga"
fi

View File

@ -0,0 +1,66 @@
TOP = ../../../..
include $(TOP)/configs/current
##### MACROS #####
XA_MAJOR = 0
XA_MINOR = 1
XA_TINY = 0
XA_CFLAGS = -g -fPIC -Wall
XA_INCLUDES= -I$(TOP)/src/gallium/ \
-I$(TOP)/src/gallium/auxiliary \
-I$(TOP)/src/gallium/include \
-I$(TOP)/src/gallium/winsys \
-I$(TOP)/src/gallium/drivers
XA_LIB = xatracker
XA_LIB_NAME = lib$(XA_LIB).o
XA_LIB_DEPS =
COMMON_GALLIUM_SOURCES=
SOURCES = \
xa_tracker.c \
xa_context.c \
xa_renderer.c \
xa_tgsi.c \
xa_yuv.c
OBJECTS = $(SOURCES:.c=.o)
##### RULES #####
.c.o:
$(CC) -c $(XA_CFLAGS) $(XA_INCLUDES) $<
##### TARGETS #####
default: $(XA_LIB_NAME)
# Make the library
$(XA_LIB_NAME): depend $(OBJECTS)
$(LD) -r -o $(XA_LIB_NAME) $(OBJECTS)
install: FORCE
clean:
-rm -f *.o *~
-rm -f *.lo
-rm -f *.la
-rm -f *.pc
-rm -rf .libs
-rm -f depend depend.bak
depend: $(SOURCES)
@ echo "running $(MKDEP)"
@ rm -f depend
@ touch depend
@ $(MKDEP) $(MKDEP_OPTIONS) -I$(TOP)/include $(XA_INCLUDES) $(SOURCES) \
> /dev/null
-include depend
FORCE:

View File

@ -0,0 +1,64 @@
/**********************************************************
* Copyright 2009-2011 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*********************************************************
* Authors:
* Zack Rusin <zackr-at-vmware-dot-com>
* Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
The XA state tracker is intended as a versioned interface to gallium for
xorg driver writers. Initially it's mostly based on Zack Rusin's
composite / video work for the Xorg state tracker.
The motivation behind this state tracker is that the Xorg state tracker has
a number of interfaces to work with:
1) The Xorg sdk (versioned)
2) Gallium3D (not versioned)
3) KMS modesetting (versioned)
4) Driver-private (hopefully versioned)
Since Gallium3D is versioned, the Xorg state tracker needs to be compiled
with Gallium, but it's really beneficial to be able to compile xorg drivers
standalone.
Therefore the xa state tracker is intended to supply the following
functionality:
1) Versioning.
2) Surface functionality (creation and copying for a basic dri2 implementation)
3) YUV blits for textured Xv.
and coming up:
4) Solid fills with ROP functionality.
5) Copies with ROP functionality, format conversion and - reinterpretation.
6) Xrender- type compositing for general acceleration.
4-6 is not implemented yet since they are not directly used by the
vmwgfx driver.
The first user will be the vmwgfx xorg driver. When there are more users,
we need to be able to load the appropriate gallium pipe driver, and we
should investigate sharing the loadig mechanism with the EGL state tracker.

View File

@ -0,0 +1,3 @@
#
indent --linux-style -i4 -ip4 -bad -bap -psl $*

View File

@ -0,0 +1,258 @@
/**********************************************************
* Copyright 2009-2011 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*********************************************************
* Authors:
* Zack Rusin <zackr-at-vmware-dot-com>
* Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#include "xa_context.h"
#include "xa_priv.h"
#include "cso_cache/cso_context.h"
#include "util/u_inlines.h"
#include "util/u_rect.h"
#include "pipe/p_context.h"
struct xa_context *
xa_context_default(struct xa_tracker *xa)
{
return xa->default_ctx;
}
struct xa_context *
xa_context_create(struct xa_tracker *xa)
{
struct xa_context *ctx = calloc(1, sizeof(*ctx));
ctx->xa = xa;
ctx->pipe = xa->screen->context_create(xa->screen, NULL);
ctx->cso = cso_create_context(ctx->pipe);
ctx->shaders = xa_shaders_create(ctx);
renderer_init_state(ctx);
return ctx;
}
void
xa_context_destroy(struct xa_context *r)
{
struct pipe_resource **vsbuf = &r->vs_const_buffer;
struct pipe_resource **fsbuf = &r->fs_const_buffer;
if (*vsbuf)
pipe_resource_reference(vsbuf, NULL);
if (*fsbuf)
pipe_resource_reference(fsbuf, NULL);
if (r->shaders) {
xa_shaders_destroy(r->shaders);
r->shaders = NULL;
}
if (r->cso) {
cso_release_all(r->cso);
cso_destroy_context(r->cso);
r->cso = NULL;
}
}
int
xa_surface_dma(struct xa_context *ctx,
struct xa_surface *srf,
void *data,
unsigned int pitch,
int to_surface, struct xa_box *boxes, unsigned int num_boxes)
{
struct pipe_transfer *transfer;
void *map;
int w, h, i;
enum pipe_transfer_usage transfer_direction;
struct pipe_context *pipe = ctx->pipe;
transfer_direction = (to_surface ? PIPE_TRANSFER_WRITE :
PIPE_TRANSFER_READ);
for (i = 0; i < num_boxes; ++i, ++boxes) {
w = boxes->x2 - boxes->x1;
h = boxes->y2 - boxes->y1;
transfer = pipe_get_transfer(pipe, srf->tex, 0, 0,
transfer_direction, boxes->x1, boxes->y1,
w, h);
if (!transfer)
return -XA_ERR_NORES;
map = pipe_transfer_map(ctx->pipe, transfer);
if (!map)
goto out_no_map;
if (to_surface) {
util_copy_rect(map, srf->tex->format, transfer->stride,
0, 0, w, h, data, pitch, boxes->x1, boxes->y1);
} else {
util_copy_rect(data, srf->tex->format, pitch,
boxes->x1, boxes->y1, w, h, map, transfer->stride, 0,
0);
}
pipe->transfer_unmap(pipe, transfer);
pipe->transfer_destroy(pipe, transfer);
if (to_surface)
pipe->flush(pipe, &ctx->last_fence);
}
return XA_ERR_NONE;
out_no_map:
pipe->transfer_destroy(pipe, transfer);
return -XA_ERR_NORES;
}
void *
xa_surface_map(struct xa_context *ctx,
struct xa_surface *srf, unsigned int usage)
{
void *map;
unsigned int transfer_direction = 0;
struct pipe_context *pipe = ctx->pipe;
if (srf->transfer)
return NULL;
if (usage & XA_MAP_READ)
transfer_direction = PIPE_TRANSFER_READ;
if (usage & XA_MAP_WRITE)
transfer_direction = PIPE_TRANSFER_WRITE;
if (!transfer_direction)
return NULL;
srf->transfer = pipe_get_transfer(pipe, srf->tex, 0, 0,
transfer_direction, 0, 0,
srf->tex->width0, srf->tex->height0);
if (!srf->transfer)
return NULL;
map = pipe_transfer_map(pipe, srf->transfer);
if (!map)
pipe->transfer_destroy(pipe, srf->transfer);
srf->mapping_pipe = pipe;
return map;
}
void
xa_surface_unmap(struct xa_surface *srf)
{
if (srf->transfer) {
struct pipe_context *pipe = srf->mapping_pipe;
pipe->transfer_unmap(pipe, srf->transfer);
pipe->transfer_destroy(pipe, srf->transfer);
srf->transfer = NULL;
}
}
int
xa_copy_prepare(struct xa_context *ctx,
struct xa_surface *dst, struct xa_surface *src)
{
if (src == dst || src->tex->format != dst->tex->format)
return -XA_ERR_INVAL;
ctx->src = src;
ctx->dst = dst;
return 0;
}
void
xa_copy(struct xa_context *ctx,
int dx, int dy, int sx, int sy, int width, int height)
{
struct pipe_box src_box;
u_box_2d(sx, sy, width, height, &src_box);
ctx->pipe->resource_copy_region(ctx->pipe,
ctx->dst->tex, 0, dx, dy, 0, ctx->src->tex,
0, &src_box);
}
void
xa_copy_done(struct xa_context *ctx)
{
ctx->pipe->flush(ctx->pipe, &ctx->last_fence);
}
struct xa_fence *
xa_fence_get(struct xa_context *ctx)
{
struct xa_fence *fence = malloc(sizeof(*fence));
struct pipe_screen *screen = ctx->xa->screen;
if (!fence)
return NULL;
fence->xa = ctx->xa;
if (ctx->last_fence == NULL)
fence->pipe_fence = NULL;
else
screen->fence_reference(screen, &fence->pipe_fence, ctx->last_fence);
return fence;
}
int
xa_fence_wait(struct xa_fence *fence, uint64_t timeout)
{
if (!fence)
return XA_ERR_NONE;
if (fence->pipe_fence) {
struct pipe_screen *screen = fence->xa->screen;
boolean timed_out;
timed_out = !screen->fence_finish(screen, fence->pipe_fence, timeout);
if (timed_out)
return -XA_ERR_BUSY;
screen->fence_reference(screen, &fence->pipe_fence, NULL);
}
return XA_ERR_NONE;
}
void
xa_fence_destroy(struct xa_fence *fence)
{
if (!fence)
return;
if (fence->pipe_fence) {
struct pipe_screen *screen = fence->xa->screen;
screen->fence_reference(screen, &fence->pipe_fence, NULL);
}
free(fence);
}

View File

@ -0,0 +1,77 @@
/**********************************************************
* Copyright 2009-2011 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*********************************************************
* Authors:
* Zack Rusin <zackr-at-vmware-dot-com>
* Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#ifndef _XA_CONTEXT_H_
#define _XA_CONTEXT_H_
#include "xa_tracker.h"
#include <stdint.h>
struct xa_context;
extern struct xa_context *xa_context_default(struct xa_tracker *xa);
extern struct xa_context *xa_context_create(struct xa_tracker *xa);
extern void xa_context_destroy(struct xa_context *r);
extern int xa_yuv_planar_blit(struct xa_context *r,
int src_x,
int src_y,
int src_w,
int src_h,
int dst_x,
int dst_y,
int dst_w,
int dst_h,
struct xa_box *box,
unsigned int num_boxes,
const float conversion_matrix[],
struct xa_surface *dst, struct xa_surface *yuv[]);
extern int xa_copy_prepare(struct xa_context *ctx,
struct xa_surface *dst, struct xa_surface *src);
extern void xa_copy(struct xa_context *ctx,
int dx, int dy, int sx, int sy, int width, int height);
extern void xa_copy_done(struct xa_context *ctx);
extern int xa_surface_dma(struct xa_context *ctx,
struct xa_surface *srf,
void *data,
unsigned int byte_pitch,
int to_surface, struct xa_box *boxes,
unsigned int num_boxes);
extern struct xa_fence *xa_fence_get(struct xa_context *ctx);
extern int xa_fence_wait(struct xa_fence *fence, uint64_t timeout);
extern void xa_fence_destroy(struct xa_fence *fence);
#endif

View File

@ -0,0 +1,179 @@
/**********************************************************
* Copyright 2009-2011 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*********************************************************
* Authors:
* Zack Rusin <zackr-at-vmware-dot-com>
* Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#ifndef _XA_PRIV_H_
#define _XA_PRIV_H_
#include "xa_tracker.h"
#include "xa_context.h"
#include "pipe/p_screen.h"
#include "pipe/p_context.h"
#include "pipe/p_state.h"
#define XA_VB_SIZE (100 * 4 * 3 * 4)
#define XA_LAST_SURFACE_TYPE (xa_type_yuv_component + 1)
struct xa_fence {
struct pipe_fence_handle *pipe_fence;
struct xa_tracker *xa;
};
struct xa_format_descriptor {
enum pipe_format format;
enum xa_formats xa_format;
};
struct xa_surface {
struct pipe_resource template;
struct xa_tracker *xa;
struct pipe_resource *tex;
struct pipe_surface *srf;
struct pipe_sampler_view *view;
unsigned int flags;
struct xa_format_descriptor fdesc;
struct pipe_transfer *transfer;
struct pipe_context *mapping_pipe;
};
struct xa_tracker {
enum xa_formats *supported_formats;
unsigned int format_map[XA_LAST_SURFACE_TYPE][2];
int d_depth_bits_last;
int ds_depth_bits_last;
struct pipe_screen *screen;
struct xa_context *default_ctx;
};
struct xa_context {
struct xa_tracker *xa;
struct pipe_context *pipe;
struct cso_context *cso;
struct xa_shaders *shaders;
struct pipe_resource *vs_const_buffer;
struct pipe_resource *fs_const_buffer;
float buffer[XA_VB_SIZE];
unsigned int buffer_size;
struct pipe_vertex_element velems[3];
/* number of attributes per vertex for the current
* draw operation */
unsigned int attrs_per_vertex;
unsigned int fb_width;
unsigned int fb_height;
struct pipe_fence_handle *last_fence;
struct xa_surface *src;
struct xa_surface *dst;
};
enum xa_vs_traits {
VS_COMPOSITE = 1 << 0,
VS_MASK = 1 << 1,
VS_SOLID_FILL = 1 << 2,
VS_LINGRAD_FILL = 1 << 3,
VS_RADGRAD_FILL = 1 << 4,
VS_YUV = 1 << 5,
VS_FILL = (VS_SOLID_FILL | VS_LINGRAD_FILL | VS_RADGRAD_FILL)
};
enum xa_fs_traits {
FS_COMPOSITE = 1 << 0,
FS_MASK = 1 << 1,
FS_SOLID_FILL = 1 << 2,
FS_LINGRAD_FILL = 1 << 3,
FS_RADGRAD_FILL = 1 << 4,
FS_CA_FULL = 1 << 5, /* src.rgba * mask.rgba */
FS_CA_SRCALPHA = 1 << 6, /* src.aaaa * mask.rgba */
FS_YUV = 1 << 7,
FS_SRC_REPEAT_NONE = 1 << 8,
FS_MASK_REPEAT_NONE = 1 << 9,
FS_SRC_SWIZZLE_RGB = 1 << 10,
FS_MASK_SWIZZLE_RGB = 1 << 11,
FS_SRC_SET_ALPHA = 1 << 12,
FS_MASK_SET_ALPHA = 1 << 13,
FS_SRC_LUMINANCE = 1 << 14,
FS_MASK_LUMINANCE = 1 << 15,
FS_FILL = (FS_SOLID_FILL | FS_LINGRAD_FILL | FS_RADGRAD_FILL),
FS_COMPONENT_ALPHA = (FS_CA_FULL | FS_CA_SRCALPHA)
};
struct xa_shader {
void *fs;
void *vs;
};
static inline int
xa_min(int a, int b)
{
return ((a <= b) ? a : b);
}
struct xa_shaders;
/*
* xa_tgsi.c
*/
extern struct xa_shaders *xa_shaders_create(struct xa_context *);
void xa_shaders_destroy(struct xa_shaders *shaders);
struct xa_shader xa_shaders_get(struct xa_shaders *shaders,
unsigned vs_traits, unsigned fs_traits);
/*
* xa_renderer.c
*/
void renderer_set_constants(struct xa_context *r,
int shader_type, const float *params,
int param_bytes);
void renderer_draw_yuv(struct xa_context *r,
float src_x,
float src_y,
float src_w,
float src_h,
int dst_x,
int dst_y, int dst_w, int dst_h,
struct xa_surface *srf[]);
void renderer_bind_destination(struct xa_context *r,
struct pipe_surface *surface, int width,
int height);
void renderer_init_state(struct xa_context *r);
#endif

View File

@ -0,0 +1,470 @@
/**********************************************************
* Copyright 2009-2011 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*********************************************************
* Authors:
* Zack Rusin <zackr-at-vmware-dot-com>
*/
#include "xa_context.h"
#include "xa_priv.h"
#include <math.h>
#include "cso_cache/cso_context.h"
#include "util/u_inlines.h"
#include "util/u_sampler.h"
#include "util/u_draw_quad.h"
#define floatsEqual(x, y) (fabs(x - y) <= 0.00001f * MIN2(fabs(x), fabs(y)))
#define floatIsZero(x) (floatsEqual((x) + 1, 1))
#define NUM_COMPONENTS 4
void
renderer_set_constants(struct xa_context *r,
int shader_type, const float *params, int param_bytes);
static INLINE boolean
is_affine(float *matrix)
{
return floatIsZero(matrix[2]) && floatIsZero(matrix[5])
&& floatsEqual(matrix[8], 1);
}
static INLINE void
map_point(float *mat, float x, float y, float *out_x, float *out_y)
{
if (!mat) {
*out_x = x;
*out_y = y;
return;
}
*out_x = mat[0] * x + mat[3] * y + mat[6];
*out_y = mat[1] * x + mat[4] * y + mat[7];
if (!is_affine(mat)) {
float w = 1 / (mat[2] * x + mat[5] * y + mat[8]);
*out_x *= w;
*out_y *= w;
}
}
static INLINE struct pipe_resource *
renderer_buffer_create(struct xa_context *r)
{
struct pipe_resource *buf = pipe_user_buffer_create(r->pipe->screen,
r->buffer,
sizeof(float) *
r->buffer_size,
PIPE_BIND_VERTEX_BUFFER);
r->buffer_size = 0;
return buf;
}
static INLINE void
renderer_draw(struct xa_context *r)
{
struct pipe_context *pipe = r->pipe;
struct pipe_resource *buf = 0;
int num_verts = r->buffer_size / (r->attrs_per_vertex * NUM_COMPONENTS);
if (!r->buffer_size)
return;
buf = renderer_buffer_create(r);
if (buf) {
cso_set_vertex_elements(r->cso, r->attrs_per_vertex, r->velems);
util_draw_vertex_buffer(pipe, r->cso, buf, 0, PIPE_PRIM_QUADS, num_verts, /* verts */
r->attrs_per_vertex); /* attribs/vert */
pipe_resource_reference(&buf, NULL);
}
}
static INLINE void
renderer_draw_conditional(struct xa_context *r, int next_batch)
{
if (r->buffer_size + next_batch >= XA_VB_SIZE ||
(next_batch == 0 && r->buffer_size)) {
renderer_draw(r);
}
}
void
renderer_init_state(struct xa_context *r)
{
struct pipe_depth_stencil_alpha_state dsa;
struct pipe_rasterizer_state raster;
unsigned i;
/* set common initial clip state */
memset(&dsa, 0, sizeof(struct pipe_depth_stencil_alpha_state));
cso_set_depth_stencil_alpha(r->cso, &dsa);
/* XXX: move to renderer_init_state? */
memset(&raster, 0, sizeof(struct pipe_rasterizer_state));
raster.gl_rasterization_rules = 1;
cso_set_rasterizer(r->cso, &raster);
/* vertex elements state */
memset(&r->velems[0], 0, sizeof(r->velems[0]) * 3);
for (i = 0; i < 3; i++) {
r->velems[i].src_offset = i * 4 * sizeof(float);
r->velems[i].instance_divisor = 0;
r->velems[i].vertex_buffer_index = 0;
r->velems[i].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
}
}
static INLINE void
add_vertex_color(struct xa_context *r, float x, float y, float color[4])
{
float *vertex = r->buffer + r->buffer_size;
vertex[0] = x;
vertex[1] = y;
vertex[2] = 0.f; /*z */
vertex[3] = 1.f; /*w */
vertex[4] = color[0]; /*r */
vertex[5] = color[1]; /*g */
vertex[6] = color[2]; /*b */
vertex[7] = color[3]; /*a */
r->buffer_size += 8;
}
static INLINE void
add_vertex_1tex(struct xa_context *r, float x, float y, float s, float t)
{
float *vertex = r->buffer + r->buffer_size;
vertex[0] = x;
vertex[1] = y;
vertex[2] = 0.f; /*z */
vertex[3] = 1.f; /*w */
vertex[4] = s; /*s */
vertex[5] = t; /*t */
vertex[6] = 0.f; /*r */
vertex[7] = 1.f; /*q */
r->buffer_size += 8;
}
static INLINE void
add_vertex_2tex(struct xa_context *r,
float x, float y, float s0, float t0, float s1, float t1)
{
float *vertex = r->buffer + r->buffer_size;
vertex[0] = x;
vertex[1] = y;
vertex[2] = 0.f; /*z */
vertex[3] = 1.f; /*w */
vertex[4] = s0; /*s */
vertex[5] = t0; /*t */
vertex[6] = 0.f; /*r */
vertex[7] = 1.f; /*q */
vertex[8] = s1; /*s */
vertex[9] = t1; /*t */
vertex[10] = 0.f; /*r */
vertex[11] = 1.f; /*q */
r->buffer_size += 12;
}
static struct pipe_resource *
setup_vertex_data_yuv(struct xa_context *r,
float srcX,
float srcY,
float srcW,
float srcH,
float dstX,
float dstY,
float dstW, float dstH, struct xa_surface *srf[])
{
float s0, t0, s1, t1;
float spt0[2], spt1[2];
struct pipe_resource *tex;
spt0[0] = srcX;
spt0[1] = srcY;
spt1[0] = srcX + srcW;
spt1[1] = srcY + srcH;
tex = srf[0]->tex;
s0 = spt0[0] / tex->width0;
t0 = spt0[1] / tex->height0;
s1 = spt1[0] / tex->width0;
t1 = spt1[1] / tex->height0;
/* 1st vertex */
add_vertex_1tex(r, dstX, dstY, s0, t0);
/* 2nd vertex */
add_vertex_1tex(r, dstX + dstW, dstY, s1, t0);
/* 3rd vertex */
add_vertex_1tex(r, dstX + dstW, dstY + dstH, s1, t1);
/* 4th vertex */
add_vertex_1tex(r, dstX, dstY + dstH, s0, t1);
return renderer_buffer_create(r);
}
/* Set up framebuffer, viewport and vertex shader constant buffer
* state for a particular destinaton surface. In all our rendering,
* these concepts are linked.
*/
void
renderer_bind_destination(struct xa_context *r,
struct pipe_surface *surface, int width, int height)
{
struct pipe_framebuffer_state fb;
struct pipe_viewport_state viewport;
/* Framebuffer uses actual surface width/height
*/
memset(&fb, 0, sizeof fb);
fb.width = surface->width;
fb.height = surface->height;
fb.nr_cbufs = 1;
fb.cbufs[0] = surface;
fb.zsbuf = 0;
/* Viewport just touches the bit we're interested in:
*/
viewport.scale[0] = width / 2.f;
viewport.scale[1] = height / 2.f;
viewport.scale[2] = 1.0;
viewport.scale[3] = 1.0;
viewport.translate[0] = width / 2.f;
viewport.translate[1] = height / 2.f;
viewport.translate[2] = 0.0;
viewport.translate[3] = 0.0;
/* Constant buffer set up to match viewport dimensions:
*/
if (r->fb_width != width || r->fb_height != height) {
float vs_consts[8] = {
2.f / width, 2.f / height, 1, 1,
-1, -1, 0, 0
};
r->fb_width = width;
r->fb_height = height;
renderer_set_constants(r, PIPE_SHADER_VERTEX,
vs_consts, sizeof vs_consts);
}
cso_set_framebuffer(r->cso, &fb);
cso_set_viewport(r->cso, &viewport);
}
void
renderer_set_constants(struct xa_context *r,
int shader_type, const float *params, int param_bytes)
{
struct pipe_resource **cbuf =
(shader_type == PIPE_SHADER_VERTEX) ? &r->vs_const_buffer :
&r->fs_const_buffer;
pipe_resource_reference(cbuf, NULL);
*cbuf = pipe_buffer_create(r->pipe->screen,
PIPE_BIND_CONSTANT_BUFFER, PIPE_USAGE_STATIC,
param_bytes);
if (*cbuf) {
pipe_buffer_write(r->pipe, *cbuf, 0, param_bytes, params);
}
r->pipe->set_constant_buffer(r->pipe, shader_type, 0, *cbuf);
}
void
renderer_copy_prepare(struct xa_context *r,
struct pipe_surface *dst_surface,
struct pipe_resource *src_texture)
{
struct pipe_context *pipe = r->pipe;
struct pipe_screen *screen = pipe->screen;
struct xa_shader shader;
assert(screen->is_format_supported(screen, dst_surface->format,
PIPE_TEXTURE_2D, 0,
PIPE_BIND_RENDER_TARGET));
(void)screen;
/* set misc state we care about */
{
struct pipe_blend_state blend;
memset(&blend, 0, sizeof(blend));
blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
blend.rt[0].colormask = PIPE_MASK_RGBA;
cso_set_blend(r->cso, &blend);
}
/* sampler */
{
struct pipe_sampler_state sampler;
memset(&sampler, 0, sizeof(sampler));
sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
sampler.normalized_coords = 1;
cso_single_sampler(r->cso, 0, &sampler);
cso_single_sampler_done(r->cso);
}
renderer_bind_destination(r, dst_surface,
dst_surface->width, dst_surface->height);
/* texture/sampler view */
{
struct pipe_sampler_view templ;
struct pipe_sampler_view *src_view;
u_sampler_view_default_template(&templ,
src_texture, src_texture->format);
src_view = pipe->create_sampler_view(pipe, src_texture, &templ);
cso_set_fragment_sampler_views(r->cso, 1, &src_view);
pipe_sampler_view_reference(&src_view, NULL);
}
/* shaders */
shader = xa_shaders_get(r->shaders, VS_COMPOSITE, FS_COMPOSITE);
cso_set_vertex_shader_handle(r->cso, shader.vs);
cso_set_fragment_shader_handle(r->cso, shader.fs);
r->buffer_size = 0;
r->attrs_per_vertex = 2;
}
void
renderer_copy_pixmap(struct xa_context *r,
int dx,
int dy,
int sx,
int sy,
int width, int height, float src_width, float src_height)
{
float s0, t0, s1, t1;
float x0, y0, x1, y1;
/* XXX: could put the texcoord scaling calculation into the vertex
* shader.
*/
s0 = sx / src_width;
s1 = (sx + width) / src_width;
t0 = sy / src_height;
t1 = (sy + height) / src_height;
x0 = dx;
x1 = dx + width;
y0 = dy;
y1 = dy + height;
/* draw quad */
renderer_draw_conditional(r, 4 * 8);
add_vertex_1tex(r, x0, y0, s0, t0);
add_vertex_1tex(r, x1, y0, s1, t0);
add_vertex_1tex(r, x1, y1, s1, t1);
add_vertex_1tex(r, x0, y1, s0, t1);
}
void
renderer_draw_yuv(struct xa_context *r,
float src_x,
float src_y,
float src_w,
float src_h,
int dst_x,
int dst_y, int dst_w, int dst_h, struct xa_surface *srf[])
{
struct pipe_context *pipe = r->pipe;
struct pipe_resource *buf = 0;
buf = setup_vertex_data_yuv(r,
src_x, src_y, src_w, src_h, dst_x, dst_y, dst_w,
dst_h, srf);
if (buf) {
const int num_attribs = 2; /*pos + tex coord */
cso_set_vertex_elements(r->cso, num_attribs, r->velems);
util_draw_vertex_buffer(pipe, r->cso, buf, 0, PIPE_PRIM_QUADS, 4, /* verts */
num_attribs); /* attribs/vert */
pipe_resource_reference(&buf, NULL);
}
}
void
renderer_begin_solid(struct xa_context *r)
{
r->buffer_size = 0;
r->attrs_per_vertex = 2;
}
void
renderer_solid(struct xa_context *r,
int x0, int y0, int x1, int y1, float *color)
{
/*
* debug_printf("solid rect[(%d, %d), (%d, %d)], rgba[%f, %f, %f, %f]\n",
* x0, y0, x1, y1, color[0], color[1], color[2], color[3]); */
renderer_draw_conditional(r, 4 * 8);
/* 1st vertex */
add_vertex_color(r, x0, y0, color);
/* 2nd vertex */
add_vertex_color(r, x1, y0, color);
/* 3rd vertex */
add_vertex_color(r, x1, y1, color);
/* 4th vertex */
add_vertex_color(r, x0, y1, color);
}
void
renderer_draw_flush(struct xa_context *r)
{
renderer_draw_conditional(r, 0);
}

View File

@ -0,0 +1,20 @@
xa_tracker_version
xa_tracker_create
xa_tracker_destroy
xa_surface_create
xa_surface_destroy
xa_surface_redefine
xa_surface_dma
xa_surface_map
xa_surface_unmap
xa_copy_prepare
xa_copy
xa_copy_done
xa_surface_handle
xa_context_default
xa_context_create
xa_context_destroy
xa_fence_get
xa_fence_wait
xa_fence_destroy
xa_yuv_planar_blit

View File

@ -0,0 +1,651 @@
/**********************************************************
* Copyright 2009-2011 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*********************************************************
* Authors:
* Zack Rusin <zackr-at-vmware-dot-com>
*/
#include "xa_priv.h"
#include "pipe/p_format.h"
#include "pipe/p_context.h"
#include "pipe/p_state.h"
#include "pipe/p_shader_tokens.h"
#include "util/u_memory.h"
#include "tgsi/tgsi_ureg.h"
#include "cso_cache/cso_context.h"
#include "cso_cache/cso_hash.h"
/* Vertex shader:
* IN[0] = vertex pos
* IN[1] = src tex coord | solid fill color
* IN[2] = mask tex coord
* IN[3] = dst tex coord
* CONST[0] = (2/dst_width, 2/dst_height, 1, 1)
* CONST[1] = (-1, -1, 0, 0)
*
* OUT[0] = vertex pos
* OUT[1] = src tex coord | solid fill color
* OUT[2] = mask tex coord
* OUT[3] = dst tex coord
*/
/* Fragment shader:
* SAMP[0] = src
* SAMP[1] = mask
* SAMP[2] = dst
* IN[0] = pos src | solid fill color
* IN[1] = pos mask
* IN[2] = pos dst
* CONST[0] = (0, 0, 0, 1)
*
* OUT[0] = color
*/
static void
print_fs_traits(int fs_traits)
{
const char *strings[] = {
"FS_COMPOSITE", /* = 1 << 0, */
"FS_MASK", /* = 1 << 1, */
"FS_SOLID_FILL", /* = 1 << 2, */
"FS_LINGRAD_FILL", /* = 1 << 3, */
"FS_RADGRAD_FILL", /* = 1 << 4, */
"FS_CA_FULL", /* = 1 << 5, *//* src.rgba * mask.rgba */
"FS_CA_SRCALPHA", /* = 1 << 6, *//* src.aaaa * mask.rgba */
"FS_YUV", /* = 1 << 7, */
"FS_SRC_REPEAT_NONE", /* = 1 << 8, */
"FS_MASK_REPEAT_NONE", /* = 1 << 9, */
"FS_SRC_SWIZZLE_RGB", /* = 1 << 10, */
"FS_MASK_SWIZZLE_RGB", /* = 1 << 11, */
"FS_SRC_SET_ALPHA", /* = 1 << 12, */
"FS_MASK_SET_ALPHA", /* = 1 << 13, */
"FS_SRC_LUMINANCE", /* = 1 << 14, */
"FS_MASK_LUMINANCE", /* = 1 << 15, */
};
int i, k;
debug_printf("%s: ", __func__);
for (i = 0, k = 1; k < (1 << 16); i++, k <<= 1) {
if (fs_traits & k)
debug_printf("%s, ", strings[i]);
}
debug_printf("\n");
}
struct xa_shaders {
struct xa_context *r;
struct cso_hash *vs_hash;
struct cso_hash *fs_hash;
};
static INLINE void
src_in_mask(struct ureg_program *ureg,
struct ureg_dst dst,
struct ureg_src src,
struct ureg_src mask,
unsigned component_alpha, unsigned mask_luminance)
{
if (component_alpha == FS_CA_FULL) {
ureg_MUL(ureg, dst, src, mask);
} else if (component_alpha == FS_CA_SRCALPHA) {
ureg_MUL(ureg, dst, ureg_scalar(src, TGSI_SWIZZLE_W), mask);
} else {
if (mask_luminance)
ureg_MUL(ureg, dst, src, ureg_scalar(mask, TGSI_SWIZZLE_X));
else
ureg_MUL(ureg, dst, src, ureg_scalar(mask, TGSI_SWIZZLE_W));
}
}
static struct ureg_src
vs_normalize_coords(struct ureg_program *ureg,
struct ureg_src coords,
struct ureg_src const0, struct ureg_src const1)
{
struct ureg_dst tmp = ureg_DECL_temporary(ureg);
struct ureg_src ret;
ureg_MAD(ureg, tmp, coords, const0, const1);
ret = ureg_src(tmp);
ureg_release_temporary(ureg, tmp);
return ret;
}
static void
linear_gradient(struct ureg_program *ureg,
struct ureg_dst out,
struct ureg_src pos,
struct ureg_src sampler,
struct ureg_src coords,
struct ureg_src const0124,
struct ureg_src matrow0,
struct ureg_src matrow1, struct ureg_src matrow2)
{
struct ureg_dst temp0 = ureg_DECL_temporary(ureg);
struct ureg_dst temp1 = ureg_DECL_temporary(ureg);
struct ureg_dst temp2 = ureg_DECL_temporary(ureg);
struct ureg_dst temp3 = ureg_DECL_temporary(ureg);
struct ureg_dst temp4 = ureg_DECL_temporary(ureg);
struct ureg_dst temp5 = ureg_DECL_temporary(ureg);
ureg_MOV(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_XY), pos);
ureg_MOV(ureg,
ureg_writemask(temp0, TGSI_WRITEMASK_Z),
ureg_scalar(const0124, TGSI_SWIZZLE_Y));
ureg_DP3(ureg, temp1, matrow0, ureg_src(temp0));
ureg_DP3(ureg, temp2, matrow1, ureg_src(temp0));
ureg_DP3(ureg, temp3, matrow2, ureg_src(temp0));
ureg_RCP(ureg, temp3, ureg_src(temp3));
ureg_MUL(ureg, temp1, ureg_src(temp1), ureg_src(temp3));
ureg_MUL(ureg, temp2, ureg_src(temp2), ureg_src(temp3));
ureg_MOV(ureg, ureg_writemask(temp4, TGSI_WRITEMASK_X), ureg_src(temp1));
ureg_MOV(ureg, ureg_writemask(temp4, TGSI_WRITEMASK_Y), ureg_src(temp2));
ureg_MUL(ureg, temp0,
ureg_scalar(coords, TGSI_SWIZZLE_Y),
ureg_scalar(ureg_src(temp4), TGSI_SWIZZLE_Y));
ureg_MAD(ureg, temp1,
ureg_scalar(coords, TGSI_SWIZZLE_X),
ureg_scalar(ureg_src(temp4), TGSI_SWIZZLE_X), ureg_src(temp0));
ureg_MUL(ureg, temp2, ureg_src(temp1), ureg_scalar(coords, TGSI_SWIZZLE_Z));
ureg_TEX(ureg, out, TGSI_TEXTURE_1D, ureg_src(temp2), sampler);
ureg_release_temporary(ureg, temp0);
ureg_release_temporary(ureg, temp1);
ureg_release_temporary(ureg, temp2);
ureg_release_temporary(ureg, temp3);
ureg_release_temporary(ureg, temp4);
ureg_release_temporary(ureg, temp5);
}
static void
radial_gradient(struct ureg_program *ureg,
struct ureg_dst out,
struct ureg_src pos,
struct ureg_src sampler,
struct ureg_src coords,
struct ureg_src const0124,
struct ureg_src matrow0,
struct ureg_src matrow1, struct ureg_src matrow2)
{
struct ureg_dst temp0 = ureg_DECL_temporary(ureg);
struct ureg_dst temp1 = ureg_DECL_temporary(ureg);
struct ureg_dst temp2 = ureg_DECL_temporary(ureg);
struct ureg_dst temp3 = ureg_DECL_temporary(ureg);
struct ureg_dst temp4 = ureg_DECL_temporary(ureg);
struct ureg_dst temp5 = ureg_DECL_temporary(ureg);
ureg_MOV(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_XY), pos);
ureg_MOV(ureg,
ureg_writemask(temp0, TGSI_WRITEMASK_Z),
ureg_scalar(const0124, TGSI_SWIZZLE_Y));
ureg_DP3(ureg, temp1, matrow0, ureg_src(temp0));
ureg_DP3(ureg, temp2, matrow1, ureg_src(temp0));
ureg_DP3(ureg, temp3, matrow2, ureg_src(temp0));
ureg_RCP(ureg, temp3, ureg_src(temp3));
ureg_MUL(ureg, temp1, ureg_src(temp1), ureg_src(temp3));
ureg_MUL(ureg, temp2, ureg_src(temp2), ureg_src(temp3));
ureg_MOV(ureg, ureg_writemask(temp5, TGSI_WRITEMASK_X), ureg_src(temp1));
ureg_MOV(ureg, ureg_writemask(temp5, TGSI_WRITEMASK_Y), ureg_src(temp2));
ureg_MUL(ureg, temp0, ureg_scalar(coords, TGSI_SWIZZLE_Y),
ureg_scalar(ureg_src(temp5), TGSI_SWIZZLE_Y));
ureg_MAD(ureg, temp1,
ureg_scalar(coords, TGSI_SWIZZLE_X),
ureg_scalar(ureg_src(temp5), TGSI_SWIZZLE_X), ureg_src(temp0));
ureg_ADD(ureg, temp1, ureg_src(temp1), ureg_src(temp1));
ureg_MUL(ureg, temp3,
ureg_scalar(ureg_src(temp5), TGSI_SWIZZLE_Y),
ureg_scalar(ureg_src(temp5), TGSI_SWIZZLE_Y));
ureg_MAD(ureg, temp4,
ureg_scalar(ureg_src(temp5), TGSI_SWIZZLE_X),
ureg_scalar(ureg_src(temp5), TGSI_SWIZZLE_X), ureg_src(temp3));
ureg_MOV(ureg, temp4, ureg_negate(ureg_src(temp4)));
ureg_MUL(ureg, temp2, ureg_scalar(coords, TGSI_SWIZZLE_Z), ureg_src(temp4));
ureg_MUL(ureg, temp0,
ureg_scalar(const0124, TGSI_SWIZZLE_W), ureg_src(temp2));
ureg_MUL(ureg, temp3, ureg_src(temp1), ureg_src(temp1));
ureg_SUB(ureg, temp2, ureg_src(temp3), ureg_src(temp0));
ureg_RSQ(ureg, temp2, ureg_abs(ureg_src(temp2)));
ureg_RCP(ureg, temp2, ureg_src(temp2));
ureg_SUB(ureg, temp1, ureg_src(temp2), ureg_src(temp1));
ureg_ADD(ureg, temp0,
ureg_scalar(coords, TGSI_SWIZZLE_Z),
ureg_scalar(coords, TGSI_SWIZZLE_Z));
ureg_RCP(ureg, temp0, ureg_src(temp0));
ureg_MUL(ureg, temp2, ureg_src(temp1), ureg_src(temp0));
ureg_TEX(ureg, out, TGSI_TEXTURE_1D, ureg_src(temp2), sampler);
ureg_release_temporary(ureg, temp0);
ureg_release_temporary(ureg, temp1);
ureg_release_temporary(ureg, temp2);
ureg_release_temporary(ureg, temp3);
ureg_release_temporary(ureg, temp4);
ureg_release_temporary(ureg, temp5);
}
static void *
create_vs(struct pipe_context *pipe, unsigned vs_traits)
{
struct ureg_program *ureg;
struct ureg_src src;
struct ureg_dst dst;
struct ureg_src const0, const1;
boolean is_fill = (vs_traits & VS_FILL) != 0;
boolean is_composite = (vs_traits & VS_COMPOSITE) != 0;
boolean has_mask = (vs_traits & VS_MASK) != 0;
boolean is_yuv = (vs_traits & VS_YUV) != 0;
unsigned input_slot = 0;
ureg = ureg_create(TGSI_PROCESSOR_VERTEX);
if (ureg == NULL)
return 0;
const0 = ureg_DECL_constant(ureg, 0);
const1 = ureg_DECL_constant(ureg, 1);
/* it has to be either a fill or a composite op */
debug_assert((is_fill ^ is_composite) ^ is_yuv);
src = ureg_DECL_vs_input(ureg, input_slot++);
dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
src = vs_normalize_coords(ureg, src, const0, const1);
ureg_MOV(ureg, dst, src);
if (is_yuv) {
src = ureg_DECL_vs_input(ureg, input_slot++);
dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_GENERIC, 0);
ureg_MOV(ureg, dst, src);
}
if (is_composite) {
src = ureg_DECL_vs_input(ureg, input_slot++);
dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_GENERIC, 0);
ureg_MOV(ureg, dst, src);
}
if (is_fill) {
src = ureg_DECL_vs_input(ureg, input_slot++);
dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_COLOR, 0);
ureg_MOV(ureg, dst, src);
}
if (has_mask) {
src = ureg_DECL_vs_input(ureg, input_slot++);
dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_GENERIC, 1);
ureg_MOV(ureg, dst, src);
}
ureg_END(ureg);
return ureg_create_shader_and_destroy(ureg, pipe);
}
static void *
create_yuv_shader(struct pipe_context *pipe, struct ureg_program *ureg)
{
struct ureg_src y_sampler, u_sampler, v_sampler;
struct ureg_src pos;
struct ureg_src matrow0, matrow1, matrow2;
struct ureg_dst y, u, v, rgb;
struct ureg_dst out = ureg_DECL_output(ureg,
TGSI_SEMANTIC_COLOR,
0);
pos = ureg_DECL_fs_input(ureg,
TGSI_SEMANTIC_GENERIC, 0,
TGSI_INTERPOLATE_PERSPECTIVE);
rgb = ureg_DECL_temporary(ureg);
y = ureg_DECL_temporary(ureg);
u = ureg_DECL_temporary(ureg);
v = ureg_DECL_temporary(ureg);
y_sampler = ureg_DECL_sampler(ureg, 0);
u_sampler = ureg_DECL_sampler(ureg, 1);
v_sampler = ureg_DECL_sampler(ureg, 2);
matrow0 = ureg_DECL_constant(ureg, 0);
matrow1 = ureg_DECL_constant(ureg, 1);
matrow2 = ureg_DECL_constant(ureg, 2);
ureg_TEX(ureg, y, TGSI_TEXTURE_2D, pos, y_sampler);
ureg_TEX(ureg, u, TGSI_TEXTURE_2D, pos, u_sampler);
ureg_TEX(ureg, v, TGSI_TEXTURE_2D, pos, v_sampler);
ureg_SUB(ureg, u, ureg_src(u), ureg_scalar(matrow0, TGSI_SWIZZLE_W));
ureg_SUB(ureg, v, ureg_src(v), ureg_scalar(matrow0, TGSI_SWIZZLE_W));
ureg_MUL(ureg, rgb, ureg_scalar(ureg_src(y), TGSI_SWIZZLE_X), matrow0);
ureg_MAD(ureg, rgb,
ureg_scalar(ureg_src(u), TGSI_SWIZZLE_X), matrow1, ureg_src(rgb));
ureg_MAD(ureg, rgb,
ureg_scalar(ureg_src(v), TGSI_SWIZZLE_X), matrow2, ureg_src(rgb));
/* rgb.a = 1; */
ureg_MOV(ureg, ureg_writemask(rgb, TGSI_WRITEMASK_W),
ureg_scalar(matrow0, TGSI_SWIZZLE_X));
ureg_MOV(ureg, out, ureg_src(rgb));
ureg_release_temporary(ureg, rgb);
ureg_release_temporary(ureg, y);
ureg_release_temporary(ureg, u);
ureg_release_temporary(ureg, v);
ureg_END(ureg);
return ureg_create_shader_and_destroy(ureg, pipe);
}
static INLINE void
xrender_tex(struct ureg_program *ureg,
struct ureg_dst dst,
struct ureg_src coords,
struct ureg_src sampler,
struct ureg_src imm0,
boolean repeat_none, boolean swizzle, boolean set_alpha)
{
if (repeat_none) {
struct ureg_dst tmp0 = ureg_DECL_temporary(ureg);
struct ureg_dst tmp1 = ureg_DECL_temporary(ureg);
ureg_SGT(ureg, tmp1, ureg_swizzle(coords,
TGSI_SWIZZLE_X,
TGSI_SWIZZLE_Y,
TGSI_SWIZZLE_X,
TGSI_SWIZZLE_Y), ureg_scalar(imm0,
TGSI_SWIZZLE_X));
ureg_SLT(ureg, tmp0,
ureg_swizzle(coords, TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y,
TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y), ureg_scalar(imm0,
TGSI_SWIZZLE_W));
ureg_MIN(ureg, tmp0, ureg_src(tmp0), ureg_src(tmp1));
ureg_MIN(ureg, tmp0, ureg_scalar(ureg_src(tmp0), TGSI_SWIZZLE_X),
ureg_scalar(ureg_src(tmp0), TGSI_SWIZZLE_Y));
ureg_TEX(ureg, tmp1, TGSI_TEXTURE_2D, coords, sampler);
if (swizzle)
ureg_MOV(ureg, tmp1, ureg_swizzle(ureg_src(tmp1),
TGSI_SWIZZLE_Z,
TGSI_SWIZZLE_Y, TGSI_SWIZZLE_X,
TGSI_SWIZZLE_W));
if (set_alpha)
ureg_MOV(ureg,
ureg_writemask(tmp1, TGSI_WRITEMASK_W),
ureg_scalar(imm0, TGSI_SWIZZLE_W));
ureg_MUL(ureg, dst, ureg_src(tmp1), ureg_src(tmp0));
ureg_release_temporary(ureg, tmp0);
ureg_release_temporary(ureg, tmp1);
} else {
if (swizzle) {
struct ureg_dst tmp = ureg_DECL_temporary(ureg);
ureg_TEX(ureg, tmp, TGSI_TEXTURE_2D, coords, sampler);
ureg_MOV(ureg, dst, ureg_swizzle(ureg_src(tmp),
TGSI_SWIZZLE_Z,
TGSI_SWIZZLE_Y, TGSI_SWIZZLE_X,
TGSI_SWIZZLE_W));
ureg_release_temporary(ureg, tmp);
} else {
ureg_TEX(ureg, dst, TGSI_TEXTURE_2D, coords, sampler);
}
if (set_alpha)
ureg_MOV(ureg,
ureg_writemask(dst, TGSI_WRITEMASK_W),
ureg_scalar(imm0, TGSI_SWIZZLE_W));
}
}
static void *
create_fs(struct pipe_context *pipe, unsigned fs_traits)
{
struct ureg_program *ureg;
struct ureg_src /*dst_sampler, */ src_sampler, mask_sampler;
struct ureg_src /*dst_pos, */ src_input, mask_pos;
struct ureg_dst src, mask;
struct ureg_dst out;
struct ureg_src imm0 = { 0 };
unsigned has_mask = (fs_traits & FS_MASK) != 0;
unsigned is_fill = (fs_traits & FS_FILL) != 0;
unsigned is_composite = (fs_traits & FS_COMPOSITE) != 0;
unsigned is_solid = (fs_traits & FS_SOLID_FILL) != 0;
unsigned is_lingrad = (fs_traits & FS_LINGRAD_FILL) != 0;
unsigned is_radgrad = (fs_traits & FS_RADGRAD_FILL) != 0;
unsigned comp_alpha_mask = fs_traits & FS_COMPONENT_ALPHA;
unsigned is_yuv = (fs_traits & FS_YUV) != 0;
unsigned src_repeat_none = (fs_traits & FS_SRC_REPEAT_NONE) != 0;
unsigned mask_repeat_none = (fs_traits & FS_MASK_REPEAT_NONE) != 0;
unsigned src_swizzle = (fs_traits & FS_SRC_SWIZZLE_RGB) != 0;
unsigned mask_swizzle = (fs_traits & FS_MASK_SWIZZLE_RGB) != 0;
unsigned src_set_alpha = (fs_traits & FS_SRC_SET_ALPHA) != 0;
unsigned mask_set_alpha = (fs_traits & FS_MASK_SET_ALPHA) != 0;
unsigned src_luminance = (fs_traits & FS_SRC_LUMINANCE) != 0;
unsigned mask_luminance = (fs_traits & FS_MASK_LUMINANCE) != 0;
#if 0
print_fs_traits(fs_traits);
#else
(void)print_fs_traits;
#endif
ureg = ureg_create(TGSI_PROCESSOR_FRAGMENT);
if (ureg == NULL)
return 0;
/* it has to be either a fill, a composite op or a yuv conversion */
debug_assert((is_fill ^ is_composite) ^ is_yuv);
(void)is_yuv;
out = ureg_DECL_output(ureg, TGSI_SEMANTIC_COLOR, 0);
if (src_repeat_none || mask_repeat_none ||
src_set_alpha || mask_set_alpha || src_luminance) {
imm0 = ureg_imm4f(ureg, 0, 0, 0, 1);
}
if (is_composite) {
src_sampler = ureg_DECL_sampler(ureg, 0);
src_input = ureg_DECL_fs_input(ureg,
TGSI_SEMANTIC_GENERIC, 0,
TGSI_INTERPOLATE_PERSPECTIVE);
} else if (is_fill) {
if (is_solid)
src_input = ureg_DECL_fs_input(ureg,
TGSI_SEMANTIC_COLOR, 0,
TGSI_INTERPOLATE_PERSPECTIVE);
else
src_input = ureg_DECL_fs_input(ureg,
TGSI_SEMANTIC_POSITION, 0,
TGSI_INTERPOLATE_PERSPECTIVE);
} else {
debug_assert(is_yuv);
return create_yuv_shader(pipe, ureg);
}
if (has_mask) {
mask_sampler = ureg_DECL_sampler(ureg, 1);
mask_pos = ureg_DECL_fs_input(ureg,
TGSI_SEMANTIC_GENERIC, 1,
TGSI_INTERPOLATE_PERSPECTIVE);
}
#if 0 /* unused right now */
dst_sampler = ureg_DECL_sampler(ureg, 2);
dst_pos = ureg_DECL_fs_input(ureg,
TGSI_SEMANTIC_POSITION, 2,
TGSI_INTERPOLATE_PERSPECTIVE);
#endif
if (is_composite) {
if (has_mask || src_luminance)
src = ureg_DECL_temporary(ureg);
else
src = out;
xrender_tex(ureg, src, src_input, src_sampler, imm0,
src_repeat_none, src_swizzle, src_set_alpha);
} else if (is_fill) {
if (is_solid) {
if (has_mask || src_luminance)
src = ureg_dst(src_input);
else
ureg_MOV(ureg, out, src_input);
} else if (is_lingrad || is_radgrad) {
struct ureg_src coords, const0124, matrow0, matrow1, matrow2;
if (has_mask || src_luminance)
src = ureg_DECL_temporary(ureg);
else
src = out;
coords = ureg_DECL_constant(ureg, 0);
const0124 = ureg_DECL_constant(ureg, 1);
matrow0 = ureg_DECL_constant(ureg, 2);
matrow1 = ureg_DECL_constant(ureg, 3);
matrow2 = ureg_DECL_constant(ureg, 4);
if (is_lingrad) {
linear_gradient(ureg, src,
src_input, src_sampler,
coords, const0124, matrow0, matrow1, matrow2);
} else if (is_radgrad) {
radial_gradient(ureg, src,
src_input, src_sampler,
coords, const0124, matrow0, matrow1, matrow2);
}
} else
debug_assert(!"Unknown fill type!");
}
if (src_luminance) {
ureg_MOV(ureg, src, ureg_scalar(ureg_src(src), TGSI_SWIZZLE_X));
ureg_MOV(ureg, ureg_writemask(src, TGSI_WRITEMASK_XYZ),
ureg_scalar(imm0, TGSI_SWIZZLE_X));
if (!has_mask)
ureg_MOV(ureg, out, ureg_src(src));
}
if (has_mask) {
mask = ureg_DECL_temporary(ureg);
xrender_tex(ureg, mask, mask_pos, mask_sampler, imm0,
mask_repeat_none, mask_swizzle, mask_set_alpha);
/* src IN mask */
src_in_mask(ureg, out, ureg_src(src), ureg_src(mask),
comp_alpha_mask, mask_luminance);
ureg_release_temporary(ureg, mask);
}
ureg_END(ureg);
return ureg_create_shader_and_destroy(ureg, pipe);
}
struct xa_shaders *
xa_shaders_create(struct xa_context *r)
{
struct xa_shaders *sc = CALLOC_STRUCT(xa_shaders);
sc->r = r;
sc->vs_hash = cso_hash_create();
sc->fs_hash = cso_hash_create();
return sc;
}
static void
cache_destroy(struct cso_context *cso,
struct cso_hash *hash, unsigned processor)
{
struct cso_hash_iter iter = cso_hash_first_node(hash);
while (!cso_hash_iter_is_null(iter)) {
void *shader = (void *)cso_hash_iter_data(iter);
if (processor == PIPE_SHADER_FRAGMENT) {
cso_delete_fragment_shader(cso, shader);
} else if (processor == PIPE_SHADER_VERTEX) {
cso_delete_vertex_shader(cso, shader);
}
iter = cso_hash_erase(hash, iter);
}
cso_hash_delete(hash);
}
void
xa_shaders_destroy(struct xa_shaders *sc)
{
cache_destroy(sc->r->cso, sc->vs_hash, PIPE_SHADER_VERTEX);
cache_destroy(sc->r->cso, sc->fs_hash, PIPE_SHADER_FRAGMENT);
FREE(sc);
}
static INLINE void *
shader_from_cache(struct pipe_context *pipe,
unsigned type, struct cso_hash *hash, unsigned key)
{
void *shader = 0;
struct cso_hash_iter iter = cso_hash_find(hash, key);
if (cso_hash_iter_is_null(iter)) {
if (type == PIPE_SHADER_VERTEX)
shader = create_vs(pipe, key);
else
shader = create_fs(pipe, key);
cso_hash_insert(hash, key, shader);
} else
shader = (void *)cso_hash_iter_data(iter);
return shader;
}
struct xa_shader
xa_shaders_get(struct xa_shaders *sc, unsigned vs_traits, unsigned fs_traits)
{
struct xa_shader shader = { NULL, NULL };
void *vs, *fs;
vs = shader_from_cache(sc->r->pipe, PIPE_SHADER_VERTEX,
sc->vs_hash, vs_traits);
fs = shader_from_cache(sc->r->pipe, PIPE_SHADER_FRAGMENT,
sc->fs_hash, fs_traits);
debug_assert(vs && fs);
if (!vs || !fs)
return shader;
shader.vs = vs;
shader.fs = fs;
return shader;
}

View File

@ -0,0 +1,420 @@
/**********************************************************
* Copyright 2009-2011 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*********************************************************
* Authors:
* Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#include "xa_tracker.h"
#include "xa_priv.h"
#include "pipe/p_state.h"
#include "pipe/p_format.h"
#include "state_tracker/drm_driver.h"
#include "util/u_inlines.h"
/*
* format_map [xa_surface_type][first..last in list].
* Needs to be updated when enum xa_formats is updated.
*/
static const enum xa_formats preferred_a[] = { xa_format_a8 };
static const enum xa_formats preferred_argb[] =
{ xa_format_a8r8g8b8, xa_format_x8r8g8b8, xa_format_r5g6b5,
xa_format_x1r5g5b5
};
static const enum xa_formats preferred_z[] =
{ xa_format_z32, xa_format_z24, xa_format_z16 };
static const enum xa_formats preferred_sz[] =
{ xa_format_x8z24, xa_format_s8z24 };
static const enum xa_formats preferred_zs[] =
{ xa_format_z24x8, xa_format_z24s8 };
static const enum xa_formats preferred_yuv[] = { xa_format_yuv8 };
static const enum xa_formats *preferred[] =
{ NULL, preferred_a, preferred_argb, NULL, NULL,
preferred_z, preferred_zs, preferred_sz, preferred_yuv
};
static const unsigned int num_preferred[] = { 0,
sizeof(preferred_a) / sizeof(enum xa_formats),
sizeof(preferred_argb) / sizeof(enum xa_formats),
0,
0,
sizeof(preferred_z) / sizeof(enum xa_formats),
sizeof(preferred_zs) / sizeof(enum xa_formats),
sizeof(preferred_sz) / sizeof(enum xa_formats),
sizeof(preferred_yuv) / sizeof(enum xa_formats)
};
static const unsigned int stype_bind[XA_LAST_SURFACE_TYPE] = { 0,
PIPE_BIND_SAMPLER_VIEW,
PIPE_BIND_SAMPLER_VIEW,
PIPE_BIND_SAMPLER_VIEW,
PIPE_BIND_SAMPLER_VIEW,
PIPE_BIND_DEPTH_STENCIL,
PIPE_BIND_DEPTH_STENCIL,
PIPE_BIND_DEPTH_STENCIL,
PIPE_BIND_SAMPLER_VIEW
};
static struct xa_format_descriptor
xa_get_pipe_format(enum xa_formats xa_format)
{
struct xa_format_descriptor fdesc;
fdesc.xa_format = xa_format;
switch (xa_format) {
case xa_format_a8r8g8b8:
fdesc.format = PIPE_FORMAT_B8G8R8A8_UNORM;
break;
case xa_format_x8r8g8b8:
fdesc.format = PIPE_FORMAT_B8G8R8X8_UNORM;
break;
case xa_format_r5g6b5:
fdesc.format = PIPE_FORMAT_B5G6R5_UNORM;
break;
case xa_format_x1r5g5b5:
fdesc.format = PIPE_FORMAT_B5G5R5A1_UNORM;
break;
case xa_format_a8:
fdesc.format = PIPE_FORMAT_L8_UNORM;
break;
case xa_format_z24:
fdesc.format = PIPE_FORMAT_Z24X8_UNORM;
break;
case xa_format_z16:
fdesc.format = PIPE_FORMAT_Z16_UNORM;
break;
case xa_format_z32:
fdesc.format = PIPE_FORMAT_Z32_UNORM;
break;
case xa_format_x8z24:
fdesc.format = PIPE_FORMAT_Z24X8_UNORM;
break;
case xa_format_z24x8:
fdesc.format = PIPE_FORMAT_X8Z24_UNORM;
break;
case xa_format_s8z24:
fdesc.format = PIPE_FORMAT_Z24_UNORM_S8_USCALED;
break;
case xa_format_z24s8:
fdesc.format = PIPE_FORMAT_S8_USCALED_Z24_UNORM;
break;
case xa_format_yuv8:
fdesc.format = PIPE_FORMAT_L8_UNORM;
break;
default:
fdesc.xa_format = xa_format_unknown;
break;
}
return fdesc;
}
struct xa_tracker *
xa_tracker_create(int drm_fd)
{
struct xa_tracker *xa = calloc(1, sizeof(struct xa_tracker));
enum xa_surface_type stype;
unsigned int num_formats;
if (!xa)
return NULL;
xa->screen = driver_descriptor.create_screen(drm_fd);
if (!xa->screen)
goto out_no_screen;
xa->default_ctx = xa_context_create(xa);
if (!xa->default_ctx)
goto out_no_pipe;
num_formats = 0;
for (stype = 0; stype < XA_LAST_SURFACE_TYPE; ++stype)
num_formats += num_preferred[stype];
num_formats += 1;
xa->supported_formats = calloc(num_formats, sizeof(*xa->supported_formats));
if (!xa->supported_formats)
goto out_sf_alloc_fail;
xa->supported_formats[0] = xa_format_unknown;
num_formats = 1;
memset(xa->format_map, 0, sizeof(xa->format_map));
for (stype = 0; stype < XA_LAST_SURFACE_TYPE; ++stype) {
unsigned int bind = stype_bind[stype];
enum xa_formats xa_format;
int i;
for (i = 0; i < num_preferred[stype]; ++i) {
xa_format = preferred[stype][i];
struct xa_format_descriptor fdesc = xa_get_pipe_format(xa_format);
if (xa->screen->is_format_supported(xa->screen, fdesc.format,
PIPE_TEXTURE_2D, 0, bind)) {
if (xa->format_map[stype][0] == 0)
xa->format_map[stype][0] = num_formats;
xa->format_map[stype][1] = num_formats;
xa->supported_formats[num_formats++] = xa_format;
}
}
}
return xa;
out_sf_alloc_fail:
xa_context_destroy(xa->default_ctx);
out_no_pipe:
xa->screen->destroy(xa->screen);
out_no_screen:
free(xa);
return NULL;
}
void
xa_tracker_destroy(struct xa_tracker *xa)
{
free(xa->supported_formats);
xa_context_destroy(xa->default_ctx);
xa->screen->destroy(xa->screen);
free(xa);
}
static int
xa_flags_compat(unsigned int old_flags, unsigned int new_flags)
{
unsigned int flag_diff = (old_flags ^ new_flags);
if (flag_diff == 0)
return 1;
if (flag_diff & XA_FLAG_SHARED)
return 0;
/*
* Don't recreate if we're dropping the render target flag.
*/
if (flag_diff & XA_FLAG_RENDER_TARGET)
return ((new_flags & XA_FLAG_RENDER_TARGET) == 0);
/*
* Always recreate for unknown / unimplemented flags.
*/
return 0;
}
static struct xa_format_descriptor
xa_get_format_stype_depth(struct xa_tracker *xa,
enum xa_surface_type stype, unsigned int depth)
{
unsigned int i;
struct xa_format_descriptor fdesc;
int found = 0;
for (i = xa->format_map[stype][0]; i <= xa->format_map[stype][1]; ++i) {
fdesc = xa_get_pipe_format(xa->supported_formats[i]);
if (fdesc.xa_format != xa_format_unknown &&
xa_format_depth(fdesc.xa_format) == depth) {
found = 1;
break;
}
}
if (!found)
fdesc.xa_format = xa_format_unknown;
return fdesc;
}
struct xa_surface *
xa_surface_create(struct xa_tracker *xa,
int width,
int height,
int depth,
enum xa_surface_type stype,
enum xa_formats xa_format, unsigned int flags)
{
struct pipe_resource *template;
struct xa_surface *srf;
struct xa_format_descriptor fdesc;
if (xa_format == xa_format_unknown)
fdesc = xa_get_format_stype_depth(xa, stype, depth);
else
fdesc = xa_get_pipe_format(xa_format);
if (fdesc.xa_format == xa_format_unknown)
return NULL;
srf = calloc(1, sizeof(*srf));
if (!srf)
return NULL;
template = &srf->template;
template->format = fdesc.format;
template->target = PIPE_TEXTURE_2D;
template->width0 = width;
template->height0 = height;
template->depth0 = 1;
template->array_size = 1;
template->last_level = 0;
template->bind = stype_bind[xa_format_type(fdesc.xa_format)];
if (flags & XA_FLAG_SHARED)
template->bind |= PIPE_BIND_SHARED;
if (flags & XA_FLAG_RENDER_TARGET)
template->bind |= PIPE_BIND_RENDER_TARGET;
srf->tex = xa->screen->resource_create(xa->screen, template);
if (!srf->tex)
goto out_no_tex;
srf->srf = NULL;
srf->xa = xa;
srf->flags = flags;
srf->fdesc = fdesc;
return srf;
out_no_tex:
free(srf);
return NULL;
}
int
xa_surface_redefine(struct xa_surface *srf,
int width,
int height,
int depth,
enum xa_surface_type stype,
enum xa_formats xa_format,
unsigned int add_flags,
unsigned int remove_flags, int copy_contents)
{
struct pipe_resource *template = &srf->template;
struct pipe_resource *texture;
struct pipe_box src_box;
struct xa_tracker *xa = srf->xa;
int save_width;
int save_height;
unsigned int new_flags = (srf->flags | add_flags) & ~(remove_flags);
struct xa_format_descriptor fdesc;
if (xa_format == xa_format_unknown)
fdesc = xa_get_format_stype_depth(xa, stype, depth);
else
fdesc = xa_get_pipe_format(xa_format);
if (width == template->width0 && height == template->height0 &&
template->format == fdesc.format &&
xa_flags_compat(srf->flags, new_flags))
return XA_ERR_NONE;
template->bind = stype_bind[xa_format_type(fdesc.xa_format)];
if (new_flags & XA_FLAG_SHARED)
template->bind |= PIPE_BIND_SHARED;
if (new_flags & XA_FLAG_RENDER_TARGET)
template->bind |= PIPE_BIND_RENDER_TARGET;
if (copy_contents) {
if (!xa_format_type_is_color(fdesc.xa_format) ||
xa_format_type(fdesc.xa_format) == xa_type_a)
return -XA_ERR_INVAL;
if (!xa->screen->is_format_supported(xa->screen, fdesc.format,
PIPE_TEXTURE_2D, 0,
template->bind |
PIPE_BIND_RENDER_TARGET))
return -XA_ERR_INVAL;
}
save_width = template->width0;
save_height = template->height0;
template->width0 = width;
template->height0 = height;
texture = xa->screen->resource_create(xa->screen, template);
if (!texture) {
template->width0 = save_width;
template->height0 = save_height;
return -XA_ERR_NORES;
}
pipe_surface_reference(&srf->srf, NULL);
if (copy_contents) {
struct pipe_context *pipe = xa->default_ctx->pipe;
u_box_origin_2d(xa_min(save_width, template->width0),
xa_min(save_height, template->height0), &src_box);
pipe->resource_copy_region(pipe, texture,
0, 0, 0, 0, srf->tex, 0, &src_box);
pipe->flush(pipe, &xa->default_ctx->last_fence);
}
pipe_resource_reference(&srf->tex, texture);
pipe_resource_reference(&texture, NULL);
srf->fdesc = fdesc;
srf->flags = new_flags;
return XA_ERR_NONE;
}
void
xa_surface_destroy(struct xa_surface *srf)
{
pipe_surface_reference(&srf->srf, NULL);
pipe_resource_reference(&srf->tex, NULL);
free(srf);
}
extern void
xa_tracker_version(int *major, int *minor, int *patch)
{
*major = XA_TRACKER_VERSION_MAJOR;
*minor = XA_TRACKER_VERSION_MINOR;
*patch = XA_TRACKER_VERSION_PATCH;
}
extern int
xa_surface_handle(struct xa_surface *srf,
uint32_t * handle, unsigned int *stride)
{
struct winsys_handle whandle;
struct pipe_screen *screen = srf->xa->screen;
boolean res;
memset(&whandle, 0, sizeof(whandle));
whandle.type = DRM_API_HANDLE_TYPE_SHARED;
res = screen->resource_get_handle(screen, srf->tex, &whandle);
if (!res)
return -XA_ERR_INVAL;
*handle = whandle.handle;
*stride = whandle.stride;
return XA_ERR_NONE;
}

View File

@ -0,0 +1,174 @@
/**********************************************************
* Copyright 2009-2011 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* The format encoding idea is partially borrowed from libpixman, but it is not
* considered a "substantial part of the software", so the pixman copyright
* is left out for simplicity, and acknowledgment is instead given in this way.
*
*********************************************************
* Authors:
* Zack Rusin <zackr-at-vmware-dot-com>
* Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#ifndef _XA_TRACKER_H_
#define _XA_TRACKER_H_
#include <stdint.h>
#define XA_TRACKER_VERSION_MAJOR 0
#define XA_TRACKER_VERSION_MINOR 1
#define XA_TRACKER_VERSION_PATCH 0
#define XA_FLAG_SHARED (1 << 0)
#define XA_FLAG_RENDER_TARGET (1 << 1)
#define XA_MAP_READ (1 << 0)
#define XA_MAP_WRITE (1 << 1)
#define XA_ERR_NONE 0
#define XA_ERR_NORES 1
#define XA_ERR_INVAL 2
#define XA_ERR_BUSY 3
enum xa_surface_type {
xa_type_other,
xa_type_a,
xa_type_argb,
xa_type_abgr,
xa_type_bgra,
xa_type_z,
xa_type_zs,
xa_type_sz,
xa_type_yuv_component
};
/*
* Note that these formats should not be assumed to be binary compatible with
* pixman formats, but with the below macros and a format type map,
* conversion should be simple. Macros for now. We might replace with
* inline functions.
*/
#define xa_format(bpp,type,a,r,g,b) (((bpp) << 24) | \
((type) << 16) | \
((a) << 12) | \
((r) << 8) | \
((g) << 4) | \
((b)))
/*
* Non-RGBA one- and two component formats.
*/
#define xa_format_c(bpp,type,c1,c2) (((bpp) << 24) | \
((type) << 16) | \
((c1) << 8) | \
((c2)))
#define xa_format_bpp(f) (((f) >> 24) )
#define xa_format_type(f) (((f) >> 16) & 0xff)
#define xa_format_a(f) (((f) >> 12) & 0x0f)
#define xa_format_r(f) (((f) >> 8) & 0x0f)
#define xa_format_g(f) (((f) >> 4) & 0x0f)
#define xa_format_b(f) (((f) ) & 0x0f)
#define xa_format_rgb(f) (((f) ) & 0xfff)
#define xa_format_c1(f) (((f) >> 8 ) & 0xff)
#define xa_format_c2(f) (((f) ) & 0xff)
#define xa_format_argb_depth(f) (xa_format_a(f) + \
xa_format_r(f) + \
xa_format_g(f) + \
xa_format_b(f))
#define xa_format_c_depth(f) (xa_format_c1(f) + \
xa_format_c2(f))
static inline int
xa_format_type_is_color(uint32_t xa_format)
{
return (xa_format_type(xa_format) < xa_type_z);
}
static inline unsigned int
xa_format_depth(uint32_t xa_format)
{
return ((xa_format_type_is_color(xa_format)) ?
xa_format_argb_depth(xa_format) : xa_format_c_depth(xa_format));
}
enum xa_formats {
xa_format_unknown = 0,
xa_format_a8 = xa_format(8, xa_type_a, 8, 0, 0, 0),
xa_format_a8r8g8b8 = xa_format(32, xa_type_argb, 8, 8, 8, 8),
xa_format_x8r8g8b8 = xa_format(32, xa_type_argb, 0, 8, 8, 8),
xa_format_r5g6b5 = xa_format(16, xa_type_argb, 0, 5, 6, 5),
xa_format_x1r5g5b5 = xa_format(16, xa_type_argb, 0, 5, 5, 5),
xa_format_z16 = xa_format_c(16, xa_type_z, 16, 0),
xa_format_z32 = xa_format_c(32, xa_type_z, 32, 0),
xa_format_z24 = xa_format_c(32, xa_type_z, 24, 0),
xa_format_x8z24 = xa_format_c(32, xa_type_sz, 24, 0),
xa_format_s8z24 = xa_format_c(32, xa_type_sz, 24, 8),
xa_format_z24x8 = xa_format_c(32, xa_type_zs, 24, 0),
xa_format_z24s8 = xa_format_c(32, xa_type_zs, 24, 8),
xa_format_yuv8 = xa_format_c(8, xa_type_yuv_component, 8, 0)
};
struct xa_tracker;
struct xa_surface;
struct xa_box {
uint16_t x1, y1, x2, y2;
};
extern void xa_tracker_version(int *major, int *minor, int *patch);
extern struct xa_tracker *xa_tracker_create(int drm_fd);
extern void xa_tracker_destroy(struct xa_tracker *xa);
extern struct xa_surface *xa_surface_create(struct xa_tracker *xa,
int width,
int height,
int depth,
enum xa_surface_type stype,
enum xa_formats pform,
unsigned int flags);
enum xa_formats xa_surface_pict_format(const struct xa_surface *srf);
extern void xa_surface_destroy(struct xa_surface *srf);
extern int xa_surface_redefine(struct xa_surface *srf,
int width,
int height,
int depth,
enum xa_surface_type stype,
enum xa_formats rgb_format,
unsigned int add_flags,
unsigned int remove_flags, int copy_contents);
extern int xa_surface_handle(struct xa_surface *srf,
uint32_t * handle, unsigned int *byte_stride);
#endif

View File

@ -0,0 +1,178 @@
/**********************************************************
* Copyright 2009-2011 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*********************************************************
* Authors:
* Zack Rusin <zackr-at-vmware-dot-com>
* Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#include "xa_context.h"
#include "xa_priv.h"
#include "util/u_inlines.h"
#include "util/u_sampler.h"
#include "util/u_surface.h"
#include "cso_cache/cso_context.h"
static void
xa_yuv_bind_blend_state(struct xa_context *r)
{
struct pipe_blend_state blend;
memset(&blend, 0, sizeof(struct pipe_blend_state));
blend.rt[0].blend_enable = 0;
blend.rt[0].colormask = PIPE_MASK_RGBA;
/* porter&duff src */
blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
cso_set_blend(r->cso, &blend);
}
static void
xa_yuv_bind_shaders(struct xa_context *r)
{
unsigned vs_traits = 0, fs_traits = 0;
struct xa_shader shader;
vs_traits |= VS_YUV;
fs_traits |= FS_YUV;
shader = xa_shaders_get(r->shaders, vs_traits, fs_traits);
cso_set_vertex_shader_handle(r->cso, shader.vs);
cso_set_fragment_shader_handle(r->cso, shader.fs);
}
static void
xa_yuv_bind_samplers(struct xa_context *r, struct xa_surface *yuv[])
{
struct pipe_sampler_state *samplers[3];
struct pipe_sampler_state sampler;
struct pipe_sampler_view *views[3];
struct pipe_sampler_view view_templ;
unsigned int i;
memset(&sampler, 0, sizeof(struct pipe_sampler_state));
sampler.wrap_s = PIPE_TEX_WRAP_CLAMP;
sampler.wrap_t = PIPE_TEX_WRAP_CLAMP;
sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NEAREST;
sampler.normalized_coords = 1;
for (i = 0; i < 3; ++i) {
samplers[i] = &sampler;
if (!yuv[i]->view) {
u_sampler_view_default_template(&view_templ,
yuv[i]->tex, yuv[i]->tex->format);
yuv[i]->view = r->pipe->create_sampler_view(r->pipe,
yuv[i]->tex,
&view_templ);
}
views[i] = yuv[i]->view;
}
cso_set_samplers(r->cso, 3, (const struct pipe_sampler_state **)samplers);
cso_set_fragment_sampler_views(r->cso, 3, views);
}
static void
xa_yuv_fs_constants(struct xa_context *r, const float conversion_matrix[])
{
const int param_bytes = 12 * sizeof(float);
renderer_set_constants(r, PIPE_SHADER_FRAGMENT,
conversion_matrix, param_bytes);
}
static void
xa_yuv_destroy_sampler_views(struct xa_surface *yuv[])
{
unsigned int i;
for (i = 0; i < 3; ++i) {
pipe_sampler_view_reference(&yuv[i]->view, NULL);
}
}
extern int
xa_yuv_planar_blit(struct xa_context *r,
int src_x,
int src_y,
int src_w,
int src_h,
int dst_x,
int dst_y,
int dst_w,
int dst_h,
struct xa_box *box,
unsigned int num_boxes,
const float conversion_matrix[],
struct xa_surface *dst, struct xa_surface *yuv[])
{
float scale_x;
float scale_y;
struct pipe_surface srf_templ;
if (dst_w == 0 || dst_h == 0)
return XA_ERR_NONE;
memset(&srf_templ, 0, sizeof(srf_templ));
u_surface_default_template(&srf_templ, dst->tex, PIPE_BIND_RENDER_TARGET);
dst->srf = r->pipe->create_surface(r->pipe, dst->tex, &srf_templ);
if (!dst->srf)
return -XA_ERR_NORES;
renderer_bind_destination(r, dst->srf, dst->srf->width, dst->srf->height);
xa_yuv_bind_blend_state(r);
xa_yuv_bind_shaders(r);
xa_yuv_bind_samplers(r, yuv);
xa_yuv_fs_constants(r, conversion_matrix);
scale_x = (float)src_w / (float)dst_w;
scale_y = (float)src_h / (float)dst_h;
while (num_boxes--) {
int x = box->x1;
int y = box->y1;
int w = box->x2 - box->x1;
int h = box->y2 - box->y1;
renderer_draw_yuv(r,
(float)src_x + scale_x * (x - dst_x),
(float)src_y + scale_y * (y - dst_y),
scale_x * w, scale_y * h, x, y, w, h, yuv);
}
r->pipe->flush(r->pipe, &r->last_fence);
xa_yuv_destroy_sampler_views(yuv);
pipe_surface_reference(&dst->srf, NULL);
return XA_ERR_NONE;
}

View File

@ -0,0 +1,100 @@
TOP = ../../../..
include $(TOP)/configs/current
##### MACROS #####
XA_MAJOR = 0
XA_MINOR = 1
XA_TINY = 0
XA_CFLAGS = -g -fPIC
XA_INCLUDES= -I$(TOP)/src/gallium/ \
-I$(TOP)/src/gallium/auxiliary \
-I$(TOP)/src/gallium/include \
-I$(TOP)/src/gallium/winsys \
-I$(TOP)/src/gallium/drivers
XA_LIB = xatracker
XA_LIB_NAME = lib$(XA_LIB).so
XA_LIB_GLOB = lib$(XA_LIB)*.so*
XA_LIB_DEPS = \
$(TOP)/src/gallium/state_trackers/xa/libxatracker.o \
$(TOP)/src/gallium/winsys/svga/drm/libsvgadrm.a \
$(TOP)/src/gallium/drivers/svga/libsvga.a \
$(TOP)/src/gallium/drivers/trace/libtrace.a \
$(TOP)/src/gallium/drivers/rbug/librbug.a
COMMON_GALLIUM_SOURCES=
SOURCES = vmw_target.c
OBJECTS = $(SOURCES:.c=.o)
ifeq ($(MESA_LLVM),1)
LDFLAGS += $(LLVM_LDFLAGS)
GALLIUM_AUXILIARIES += $(LLVM_LIBS)
else
LDFLAGS += -lstdc++
endif
##### RULES #####
.c.o:
$(CC) -c $(XA_CFLAGS) $(XA_INCLUDES) $<
##### TARGETS #####
default: $(TOP)/$(LIB_DIR)/gallium/$(XA_LIB_NAME)
# Make the library
$(TOP)/$(LIB_DIR)/gallium/$(XA_LIB_NAME): depend $(OBJECTS) $(XA_LIB_DEPS)
$(MKLIB) -o $(XA_LIB) -linker $(CC) -ldflags '$(LDFLAGS)' \
-major $(XA_MAJOR) -minor $(XA_MINOR) -patch $(XA_TINY) \
$(MKLIB_OPTIONS) \
-exports $(TOP)/src/gallium/state_trackers/xa/xa_symbols\
-install $(TOP)/$(LIB_DIR)/gallium \
$(OBJECTS) $(XA_LIB_DEPS) $(GALLIUM_AUXILIARIES)
# xa pkgconfig file
pcedit = sed \
-e 's,@INSTALL_DIR@,$(INSTALL_DIR),g' \
-e 's,@INSTALL_LIB_DIR@,$(INSTALL_LIB_DIR),g' \
-e 's,@INSTALL_INC_DIR@,$(INSTALL_INC_DIR),g' \
-e 's,@VERSION@,$(XA_MAJOR).$(XA_MINOR).$(XA_TINY),g' \
-e 's,@XA_PC_REQ_PRIV@,$(XA_PC_REQ_PRIV),g' \
-e 's,@XA_PC_LIB_PRIV@,$(XA_PC_LIB_PRIV),g' \
-e 's,@XA_PC_CFLAGS@,$(XA_PC_CFLAGS),g' \
-e 's,@XA_LIB@,$(XA_LIB),g'
xatracker.pc: xatracker.pc.in
$(pcedit) $< > $@
install: xatracker.pc
$(INSTALL) -d $(DESTDIR)$(INSTALL_INC_DIR)
$(INSTALL) -d $(DESTDIR)$(INSTALL_LIB_DIR)
$(INSTALL) -d $(DESTDIR)$(INSTALL_LIB_DIR)/pkgconfig
$(INSTALL) -m 644 $(TOP)/src/gallium/state_trackers/xa/xa_tracker.h $(DESTDIR)$(INSTALL_INC_DIR)
$(INSTALL) -m 644 $(TOP)/src/gallium/state_trackers/xa/xa_context.h $(DESTDIR)$(INSTALL_INC_DIR)
$(MINSTALL) -m 755 $(TOP)/$(LIB_DIR)/gallium/$(XA_LIB_GLOB) $(DESTDIR)$(INSTALL_LIB_DIR)
$(INSTALL) -m 644 xatracker.pc $(DESTDIR)$(INSTALL_LIB_DIR)/pkgconfig
clean:
-rm -f *.o *~
-rm -f *.lo
-rm -f *.la
-rm -f *.pc
-rm -rf .libs
-rm -f depend depend.bak exptmp
depend: $(SOURCES)
@ echo "running $(MKDEP)"
@ rm -f depend
@ touch depend
@ $(MKDEP) $(MKDEP_OPTIONS) -I$(TOP)/include $(XA_INCLUDES) $(SOURCES) \
> /dev/null
-include depend
FORCE:

View File

@ -0,0 +1,27 @@
#include "target-helpers/inline_debug_helper.h"
#include "state_tracker/drm_driver.h"
#include "svga/drm/svga_drm_public.h"
#include "svga/svga_public.h"
#include "xa_tracker.h"
static struct pipe_screen *
create_screen(int fd)
{
struct svga_winsys_screen *sws;
struct pipe_screen *screen;
sws = svga_drm_winsys_screen_create(fd);
if (!sws)
return NULL;
screen = svga_screen_create(sws);
if (!screen)
return NULL;
screen = debug_screen_wrap(screen);
return screen;
}
DRM_DRIVER_DESCRIPTOR("vmwgfx", "vmwgfx", create_screen)

View File

@ -0,0 +1,13 @@
prefix=@INSTALL_DIR@
exec_prefix=${prefix}
libdir=@INSTALL_LIB_DIR@
includedir=@INSTALL_INC_DIR@
Name: xatracker
Description: Xorg Gallium3D acceleration library
Requires:
Requires.private: @XA_PC_REQ_PRIV@
Version: @VERSION@
Libs: -L${libdir} -l@XA_LIB@
Libs.private: @XA_PC_LIB_PRIV@
Cflags: -I${includedir} @XA_PC_CFLAGS@