i965: Import libdrm_intel.

This imports commit 19c4cfc54918d361f2535aec16650e9f0be667cd of
libdrm/intel/*.[ch], minus a few files that we're never going to use
(and would immediately delete), plus a few necessary dependencies.

We rename intel_bufmgr.h to brw_bufmgr.h to avoid #include conflicts.
We also fix UTF-8 symbol problems in intel_bufmgr_gem.c comments
because vim keeps trying to fix that every time I edit the file,
and we may as well fix it right away.

Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Jason Ekstrand <jason@jlekstrand.net>
This commit is contained in:
Kenneth Graunke 2017-03-20 16:40:01 -07:00
parent 915820cc59
commit 514db96c11
9 changed files with 6724 additions and 0 deletions

View File

@ -0,0 +1,341 @@
/*
* Copyright © 2008-2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
/**
* @file intel_bufmgr.h
*
* Public definitions of Intel-specific bufmgr functions.
*/
#ifndef INTEL_BUFMGR_H
#define INTEL_BUFMGR_H
#include <stdio.h>
#include <stdint.h>
#include <stdio.h>
#if defined(__cplusplus)
extern "C" {
#endif
struct drm_clip_rect;
typedef struct _drm_intel_bufmgr drm_intel_bufmgr;
typedef struct _drm_intel_context drm_intel_context;
typedef struct _drm_intel_bo drm_intel_bo;
struct _drm_intel_bo {
/**
* Size in bytes of the buffer object.
*
* The size may be larger than the size originally requested for the
* allocation, such as being aligned to page size.
*/
unsigned long size;
/**
* Alignment requirement for object
*
* Used for GTT mapping & pinning the object.
*/
unsigned long align;
/**
* Deprecated field containing (possibly the low 32-bits of) the last
* seen virtual card address. Use offset64 instead.
*/
unsigned long offset;
/**
* Virtual address for accessing the buffer data. Only valid while
* mapped.
*/
#ifdef __cplusplus
void *virt;
#else
void *virtual;
#endif
/** Buffer manager context associated with this buffer object */
drm_intel_bufmgr *bufmgr;
/**
* MM-specific handle for accessing object
*/
int handle;
/**
* Last seen card virtual address (offset from the beginning of the
* aperture) for the object. This should be used to fill relocation
* entries when calling drm_intel_bo_emit_reloc()
*/
uint64_t offset64;
};
enum aub_dump_bmp_format {
AUB_DUMP_BMP_FORMAT_8BIT = 1,
AUB_DUMP_BMP_FORMAT_ARGB_4444 = 4,
AUB_DUMP_BMP_FORMAT_ARGB_0888 = 6,
AUB_DUMP_BMP_FORMAT_ARGB_8888 = 7,
};
typedef struct _drm_intel_aub_annotation {
uint32_t type;
uint32_t subtype;
uint32_t ending_offset;
} drm_intel_aub_annotation;
#define BO_ALLOC_FOR_RENDER (1<<0)
drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment);
drm_intel_bo *drm_intel_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
const char *name,
void *addr, uint32_t tiling_mode,
uint32_t stride, unsigned long size,
unsigned long flags);
drm_intel_bo *drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr,
const char *name,
int x, int y, int cpp,
uint32_t *tiling_mode,
unsigned long *pitch,
unsigned long flags);
void drm_intel_bo_reference(drm_intel_bo *bo);
void drm_intel_bo_unreference(drm_intel_bo *bo);
int drm_intel_bo_map(drm_intel_bo *bo, int write_enable);
int drm_intel_bo_unmap(drm_intel_bo *bo);
int drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, const void *data);
int drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, void *data);
void drm_intel_bo_wait_rendering(drm_intel_bo *bo);
void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug);
void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr);
int drm_intel_bo_exec(drm_intel_bo *bo, int used,
struct drm_clip_rect *cliprects, int num_cliprects, int DR4);
int drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used,
struct drm_clip_rect *cliprects, int num_cliprects, int DR4,
unsigned int flags);
int drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count);
int drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain);
int drm_intel_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo,
uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain);
int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment);
int drm_intel_bo_unpin(drm_intel_bo *bo);
int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t stride);
int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode);
int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name);
int drm_intel_bo_busy(drm_intel_bo *bo);
int drm_intel_bo_madvise(drm_intel_bo *bo, int madv);
int drm_intel_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable);
int drm_intel_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset);
int drm_intel_bo_disable_reuse(drm_intel_bo *bo);
int drm_intel_bo_is_reusable(drm_intel_bo *bo);
int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo);
/* drm_intel_bufmgr_gem.c */
drm_intel_bufmgr *drm_intel_bufmgr_gem_init(int fd, int batch_size);
drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned int handle);
void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr);
void drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr);
void drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr,
int limit);
int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo);
int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo);
#define HAVE_DRM_INTEL_GEM_BO_DISABLE_IMPLICIT_SYNC 1
int drm_intel_bufmgr_gem_can_disable_implicit_sync(drm_intel_bufmgr *bufmgr);
void drm_intel_gem_bo_disable_implicit_sync(drm_intel_bo *bo);
void drm_intel_gem_bo_enable_implicit_sync(drm_intel_bo *bo);
void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo);
void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo);
void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo);
int drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo);
void drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start);
void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable);
void
drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
const char *filename);
void drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable);
void drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
int x1, int y1, int width, int height,
enum aub_dump_bmp_format format,
int pitch, int offset);
void
drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
drm_intel_aub_annotation *annotations,
unsigned count);
int drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id);
int drm_intel_get_aperture_sizes(int fd, size_t *mappable, size_t *total);
int drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr);
int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns);
drm_intel_context *drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr);
int drm_intel_gem_context_get_id(drm_intel_context *ctx,
uint32_t *ctx_id);
void drm_intel_gem_context_destroy(drm_intel_context *ctx);
int drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
int used, unsigned int flags);
int drm_intel_gem_bo_fence_exec(drm_intel_bo *bo,
drm_intel_context *ctx,
int used,
int in_fence,
int *out_fence,
unsigned int flags);
int drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd);
drm_intel_bo *drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr,
int prime_fd, int size);
/* drm_intel_bufmgr_fake.c */
drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
unsigned long low_offset,
void *low_virtual,
unsigned long size,
volatile unsigned int
*last_dispatch);
void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
volatile unsigned int
*last_dispatch);
void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
int (*exec) (drm_intel_bo *bo,
unsigned int used,
void *priv),
void *priv);
void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
unsigned int (*emit) (void *priv),
void (*wait) (unsigned int fence,
void *priv),
void *priv);
drm_intel_bo *drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long offset,
unsigned long size, void *virt);
void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
void (*invalidate_cb) (drm_intel_bo
* bo,
void *ptr),
void *ptr);
void drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr);
void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr);
struct drm_intel_decode *drm_intel_decode_context_alloc(uint32_t devid);
void drm_intel_decode_context_free(struct drm_intel_decode *ctx);
void drm_intel_decode_set_batch_pointer(struct drm_intel_decode *ctx,
void *data, uint32_t hw_offset,
int count);
void drm_intel_decode_set_dump_past_end(struct drm_intel_decode *ctx,
int dump_past_end);
void drm_intel_decode_set_head_tail(struct drm_intel_decode *ctx,
uint32_t head, uint32_t tail);
void drm_intel_decode_set_output_file(struct drm_intel_decode *ctx, FILE *out);
void drm_intel_decode(struct drm_intel_decode *ctx);
int drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
uint32_t offset,
uint64_t *result);
int drm_intel_get_reset_stats(drm_intel_context *ctx,
uint32_t *reset_count,
uint32_t *active,
uint32_t *pending);
int drm_intel_get_subslice_total(int fd, unsigned int *subslice_total);
int drm_intel_get_eu_total(int fd, unsigned int *eu_total);
int drm_intel_get_pooled_eu(int fd);
int drm_intel_get_min_eu_in_pool(int fd);
/** @{ Compatibility defines to keep old code building despite the symbol rename
* from dri_* to drm_intel_*
*/
#define dri_bo drm_intel_bo
#define dri_bufmgr drm_intel_bufmgr
#define dri_bo_alloc drm_intel_bo_alloc
#define dri_bo_reference drm_intel_bo_reference
#define dri_bo_unreference drm_intel_bo_unreference
#define dri_bo_map drm_intel_bo_map
#define dri_bo_unmap drm_intel_bo_unmap
#define dri_bo_subdata drm_intel_bo_subdata
#define dri_bo_get_subdata drm_intel_bo_get_subdata
#define dri_bo_wait_rendering drm_intel_bo_wait_rendering
#define dri_bufmgr_set_debug drm_intel_bufmgr_set_debug
#define dri_bufmgr_destroy drm_intel_bufmgr_destroy
#define dri_bo_exec drm_intel_bo_exec
#define dri_bufmgr_check_aperture_space drm_intel_bufmgr_check_aperture_space
#define dri_bo_emit_reloc(reloc_bo, read, write, target_offset, \
reloc_offset, target_bo) \
drm_intel_bo_emit_reloc(reloc_bo, reloc_offset, \
target_bo, target_offset, \
read, write);
#define dri_bo_pin drm_intel_bo_pin
#define dri_bo_unpin drm_intel_bo_unpin
#define dri_bo_get_tiling drm_intel_bo_get_tiling
#define dri_bo_set_tiling(bo, mode) drm_intel_bo_set_tiling(bo, mode, 0)
#define dri_bo_flink drm_intel_bo_flink
#define intel_bufmgr_gem_init drm_intel_bufmgr_gem_init
#define intel_bo_gem_create_from_name drm_intel_bo_gem_create_from_name
#define intel_bufmgr_gem_enable_reuse drm_intel_bufmgr_gem_enable_reuse
#define intel_bufmgr_fake_init drm_intel_bufmgr_fake_init
#define intel_bufmgr_fake_set_last_dispatch drm_intel_bufmgr_fake_set_last_dispatch
#define intel_bufmgr_fake_set_exec_callback drm_intel_bufmgr_fake_set_exec_callback
#define intel_bufmgr_fake_set_fence_callback drm_intel_bufmgr_fake_set_fence_callback
#define intel_bo_fake_alloc_static drm_intel_bo_fake_alloc_static
#define intel_bo_fake_disable_backing_store drm_intel_bo_fake_disable_backing_store
#define intel_bufmgr_fake_contended_lock_take drm_intel_bufmgr_fake_contended_lock_take
#define intel_bufmgr_fake_evict_all drm_intel_bufmgr_fake_evict_all
/** @{ */
#if defined(__cplusplus)
}
#endif
#endif /* INTEL_BUFMGR_H */

View File

@ -0,0 +1,374 @@
/*
* Copyright © 2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <string.h>
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#include <errno.h>
#include <drm.h>
#include <i915_drm.h>
#include <pciaccess.h>
#include "libdrm_macros.h"
#include "intel_bufmgr.h"
#include "intel_bufmgr_priv.h"
#include "xf86drm.h"
/** @file intel_bufmgr.c
*
* Convenience functions for buffer management methods.
*/
drm_intel_bo *
drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
{
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
}
drm_intel_bo *
drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
{
return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
}
drm_intel_bo *
drm_intel_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
const char *name, void *addr,
uint32_t tiling_mode,
uint32_t stride,
unsigned long size,
unsigned long flags)
{
if (bufmgr->bo_alloc_userptr)
return bufmgr->bo_alloc_userptr(bufmgr, name, addr, tiling_mode,
stride, size, flags);
return NULL;
}
drm_intel_bo *
drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
int x, int y, int cpp, uint32_t *tiling_mode,
unsigned long *pitch, unsigned long flags)
{
return bufmgr->bo_alloc_tiled(bufmgr, name, x, y, cpp,
tiling_mode, pitch, flags);
}
void
drm_intel_bo_reference(drm_intel_bo *bo)
{
bo->bufmgr->bo_reference(bo);
}
void
drm_intel_bo_unreference(drm_intel_bo *bo)
{
if (bo == NULL)
return;
bo->bufmgr->bo_unreference(bo);
}
int
drm_intel_bo_map(drm_intel_bo *buf, int write_enable)
{
return buf->bufmgr->bo_map(buf, write_enable);
}
int
drm_intel_bo_unmap(drm_intel_bo *buf)
{
return buf->bufmgr->bo_unmap(buf);
}
int
drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, const void *data)
{
return bo->bufmgr->bo_subdata(bo, offset, size, data);
}
int
drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, void *data)
{
int ret;
if (bo->bufmgr->bo_get_subdata)
return bo->bufmgr->bo_get_subdata(bo, offset, size, data);
if (size == 0 || data == NULL)
return 0;
ret = drm_intel_bo_map(bo, 0);
if (ret)
return ret;
memcpy(data, (unsigned char *)bo->virtual + offset, size);
drm_intel_bo_unmap(bo);
return 0;
}
void
drm_intel_bo_wait_rendering(drm_intel_bo *bo)
{
bo->bufmgr->bo_wait_rendering(bo);
}
void
drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
{
bufmgr->destroy(bufmgr);
}
int
drm_intel_bo_exec(drm_intel_bo *bo, int used,
drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
{
return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
}
int
drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
unsigned int rings)
{
if (bo->bufmgr->bo_mrb_exec)
return bo->bufmgr->bo_mrb_exec(bo, used,
cliprects, num_cliprects, DR4,
rings);
switch (rings) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
return bo->bufmgr->bo_exec(bo, used,
cliprects, num_cliprects, DR4);
default:
return -ENODEV;
}
}
void
drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
{
bufmgr->debug = enable_debug;
}
int
drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count)
{
return bo_array[0]->bufmgr->check_aperture_space(bo_array, count);
}
int
drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name)
{
if (bo->bufmgr->bo_flink)
return bo->bufmgr->bo_flink(bo, name);
return -ENODEV;
}
int
drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
{
return bo->bufmgr->bo_emit_reloc(bo, offset,
target_bo, target_offset,
read_domains, write_domain);
}
/* For fence registers, not GL fences */
int
drm_intel_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
{
return bo->bufmgr->bo_emit_reloc_fence(bo, offset,
target_bo, target_offset,
read_domains, write_domain);
}
int
drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
{
if (bo->bufmgr->bo_pin)
return bo->bufmgr->bo_pin(bo, alignment);
return -ENODEV;
}
int
drm_intel_bo_unpin(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_unpin)
return bo->bufmgr->bo_unpin(bo);
return -ENODEV;
}
int
drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t stride)
{
if (bo->bufmgr->bo_set_tiling)
return bo->bufmgr->bo_set_tiling(bo, tiling_mode, stride);
*tiling_mode = I915_TILING_NONE;
return 0;
}
int
drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode)
{
if (bo->bufmgr->bo_get_tiling)
return bo->bufmgr->bo_get_tiling(bo, tiling_mode, swizzle_mode);
*tiling_mode = I915_TILING_NONE;
*swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
return 0;
}
int
drm_intel_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
{
if (bo->bufmgr->bo_set_softpin_offset)
return bo->bufmgr->bo_set_softpin_offset(bo, offset);
return -ENODEV;
}
int
drm_intel_bo_disable_reuse(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_disable_reuse)
return bo->bufmgr->bo_disable_reuse(bo);
return 0;
}
int
drm_intel_bo_is_reusable(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_is_reusable)
return bo->bufmgr->bo_is_reusable(bo);
return 0;
}
int
drm_intel_bo_busy(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_busy)
return bo->bufmgr->bo_busy(bo);
return 0;
}
int
drm_intel_bo_madvise(drm_intel_bo *bo, int madv)
{
if (bo->bufmgr->bo_madvise)
return bo->bufmgr->bo_madvise(bo, madv);
return -1;
}
int
drm_intel_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
{
if (bo->bufmgr->bo_use_48b_address_range) {
bo->bufmgr->bo_use_48b_address_range(bo, enable);
return 0;
}
return -ENODEV;
}
int
drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
{
return bo->bufmgr->bo_references(bo, target_bo);
}
int
drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
{
if (bufmgr->get_pipe_from_crtc_id)
return bufmgr->get_pipe_from_crtc_id(bufmgr, crtc_id);
return -1;
}
static size_t
drm_intel_probe_agp_aperture_size(int fd)
{
struct pci_device *pci_dev;
size_t size = 0;
int ret;
ret = pci_system_init();
if (ret)
goto err;
/* XXX handle multiple adaptors? */
pci_dev = pci_device_find_by_slot(0, 0, 2, 0);
if (pci_dev == NULL)
goto err;
ret = pci_device_probe(pci_dev);
if (ret)
goto err;
size = pci_dev->regions[2].size;
err:
pci_system_cleanup ();
return size;
}
int
drm_intel_get_aperture_sizes(int fd, size_t *mappable, size_t *total)
{
struct drm_i915_gem_get_aperture aperture;
int ret;
ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
if (ret)
return ret;
*mappable = 0;
/* XXX add a query for the kernel value? */
if (*mappable == 0)
*mappable = drm_intel_probe_agp_aperture_size(fd);
if (*mappable == 0)
*mappable = 64 * 1024 * 1024; /* minimum possible value */
*total = aperture.aper_size;
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,325 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
/**
* @file intel_bufmgr_priv.h
*
* Private definitions of Intel-specific bufmgr functions and structures.
*/
#ifndef INTEL_BUFMGR_PRIV_H
#define INTEL_BUFMGR_PRIV_H
/**
* Context for a buffer manager instance.
*
* Contains public methods followed by private storage for the buffer manager.
*/
struct _drm_intel_bufmgr {
/**
* Allocate a buffer object.
*
* Buffer objects are not necessarily initially mapped into CPU virtual
* address space or graphics device aperture. They must be mapped
* using bo_map() or drm_intel_gem_bo_map_gtt() to be used by the CPU.
*/
drm_intel_bo *(*bo_alloc) (drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
/**
* Allocate a buffer object, hinting that it will be used as a
* render target.
*
* This is otherwise the same as bo_alloc.
*/
drm_intel_bo *(*bo_alloc_for_render) (drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment);
/**
* Allocate a buffer object from an existing user accessible
* address malloc'd with the provided size.
* Alignment is used when mapping to the gtt.
* Flags may be I915_VMAP_READ_ONLY or I915_USERPTR_UNSYNCHRONIZED
*/
drm_intel_bo *(*bo_alloc_userptr)(drm_intel_bufmgr *bufmgr,
const char *name, void *addr,
uint32_t tiling_mode, uint32_t stride,
unsigned long size,
unsigned long flags);
/**
* Allocate a tiled buffer object.
*
* Alignment for tiled objects is set automatically; the 'flags'
* argument provides a hint about how the object will be used initially.
*
* Valid tiling formats are:
* I915_TILING_NONE
* I915_TILING_X
* I915_TILING_Y
*
* Note the tiling format may be rejected; callers should check the
* 'tiling_mode' field on return, as well as the pitch value, which
* may have been rounded up to accommodate for tiling restrictions.
*/
drm_intel_bo *(*bo_alloc_tiled) (drm_intel_bufmgr *bufmgr,
const char *name,
int x, int y, int cpp,
uint32_t *tiling_mode,
unsigned long *pitch,
unsigned long flags);
/** Takes a reference on a buffer object */
void (*bo_reference) (drm_intel_bo *bo);
/**
* Releases a reference on a buffer object, freeing the data if
* no references remain.
*/
void (*bo_unreference) (drm_intel_bo *bo);
/**
* Maps the buffer into userspace.
*
* This function will block waiting for any existing execution on the
* buffer to complete, first. The resulting mapping is available at
* buf->virtual.
*/
int (*bo_map) (drm_intel_bo *bo, int write_enable);
/**
* Reduces the refcount on the userspace mapping of the buffer
* object.
*/
int (*bo_unmap) (drm_intel_bo *bo);
/**
* Write data into an object.
*
* This is an optional function, if missing,
* drm_intel_bo will map/memcpy/unmap.
*/
int (*bo_subdata) (drm_intel_bo *bo, unsigned long offset,
unsigned long size, const void *data);
/**
* Read data from an object
*
* This is an optional function, if missing,
* drm_intel_bo will map/memcpy/unmap.
*/
int (*bo_get_subdata) (drm_intel_bo *bo, unsigned long offset,
unsigned long size, void *data);
/**
* Waits for rendering to an object by the GPU to have completed.
*
* This is not required for any access to the BO by bo_map,
* bo_subdata, etc. It is merely a way for the driver to implement
* glFinish.
*/
void (*bo_wait_rendering) (drm_intel_bo *bo);
/**
* Tears down the buffer manager instance.
*/
void (*destroy) (drm_intel_bufmgr *bufmgr);
/**
* Indicate if the buffer can be placed anywhere in the full ppgtt
* address range (2^48).
*
* Any resource used with flat/heapless (0x00000000-0xfffff000)
* General State Heap (GSH) or Intructions State Heap (ISH) must
* be in a 32-bit range. 48-bit range will only be used when explicitly
* requested.
*
* \param bo Buffer to set the use_48b_address_range flag.
* \param enable The flag value.
*/
void (*bo_use_48b_address_range) (drm_intel_bo *bo, uint32_t enable);
/**
* Add relocation entry in reloc_buf, which will be updated with the
* target buffer's real offset on on command submission.
*
* Relocations remain in place for the lifetime of the buffer object.
*
* \param bo Buffer to write the relocation into.
* \param offset Byte offset within reloc_bo of the pointer to
* target_bo.
* \param target_bo Buffer whose offset should be written into the
* relocation entry.
* \param target_offset Constant value to be added to target_bo's
* offset in relocation entry.
* \param read_domains GEM read domains which the buffer will be
* read into by the command that this relocation
* is part of.
* \param write_domains GEM read domains which the buffer will be
* dirtied in by the command that this
* relocation is part of.
*/
int (*bo_emit_reloc) (drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain);
int (*bo_emit_reloc_fence)(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo,
uint32_t target_offset,
uint32_t read_domains,
uint32_t write_domain);
/** Executes the command buffer pointed to by bo. */
int (*bo_exec) (drm_intel_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4);
/** Executes the command buffer pointed to by bo on the selected
* ring buffer
*/
int (*bo_mrb_exec) (drm_intel_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4, unsigned flags);
/**
* Pin a buffer to the aperture and fix the offset until unpinned
*
* \param buf Buffer to pin
* \param alignment Required alignment for aperture, in bytes
*/
int (*bo_pin) (drm_intel_bo *bo, uint32_t alignment);
/**
* Unpin a buffer from the aperture, allowing it to be removed
*
* \param buf Buffer to unpin
*/
int (*bo_unpin) (drm_intel_bo *bo);
/**
* Ask that the buffer be placed in tiling mode
*
* \param buf Buffer to set tiling mode for
* \param tiling_mode desired, and returned tiling mode
*/
int (*bo_set_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t stride);
/**
* Get the current tiling (and resulting swizzling) mode for the bo.
*
* \param buf Buffer to get tiling mode for
* \param tiling_mode returned tiling mode
* \param swizzle_mode returned swizzling mode
*/
int (*bo_get_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode);
/**
* Set the offset at which this buffer will be softpinned
* \param bo Buffer to set the softpin offset for
* \param offset Softpin offset
*/
int (*bo_set_softpin_offset) (drm_intel_bo *bo, uint64_t offset);
/**
* Create a visible name for a buffer which can be used by other apps
*
* \param buf Buffer to create a name for
* \param name Returned name
*/
int (*bo_flink) (drm_intel_bo *bo, uint32_t * name);
/**
* Returns 1 if mapping the buffer for write could cause the process
* to block, due to the object being active in the GPU.
*/
int (*bo_busy) (drm_intel_bo *bo);
/**
* Specify the volatility of the buffer.
* \param bo Buffer to create a name for
* \param madv The purgeable status
*
* Use I915_MADV_DONTNEED to mark the buffer as purgeable, and it will be
* reclaimed under memory pressure. If you subsequently require the buffer,
* then you must pass I915_MADV_WILLNEED to mark the buffer as required.
*
* Returns 1 if the buffer was retained, or 0 if it was discarded whilst
* marked as I915_MADV_DONTNEED.
*/
int (*bo_madvise) (drm_intel_bo *bo, int madv);
int (*check_aperture_space) (drm_intel_bo ** bo_array, int count);
/**
* Disable buffer reuse for buffers which will be shared in some way,
* as with scanout buffers. When the buffer reference count goes to
* zero, it will be freed and not placed in the reuse list.
*
* \param bo Buffer to disable reuse for
*/
int (*bo_disable_reuse) (drm_intel_bo *bo);
/**
* Query whether a buffer is reusable.
*
* \param bo Buffer to query
*/
int (*bo_is_reusable) (drm_intel_bo *bo);
/**
*
* Return the pipe associated with a crtc_id so that vblank
* synchronization can use the correct data in the request.
* This is only supported for KMS and gem at this point, when
* unsupported, this function returns -1 and leaves the decision
* of what to do in that case to the caller
*
* \param bufmgr the associated buffer manager
* \param crtc_id the crtc identifier
*/
int (*get_pipe_from_crtc_id) (drm_intel_bufmgr *bufmgr, int crtc_id);
/** Returns true if target_bo is in the relocation tree rooted at bo. */
int (*bo_references) (drm_intel_bo *bo, drm_intel_bo *target_bo);
/**< Enables verbose debugging printouts */
int debug;
};
struct _drm_intel_context {
unsigned int ctx_id;
struct _drm_intel_bufmgr *bufmgr;
};
#define ALIGN(value, alignment) ((value + alignment - 1) & ~(alignment - 1))
#define ROUND_UP_TO(x, y) (((x) + (y) - 1) / (y) * (y))
#define ROUND_UP_TO_MB(x) ROUND_UP_TO((x), 1024*1024)
#endif /* INTEL_BUFMGR_PRIV_H */

View File

@ -0,0 +1,469 @@
/*
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _INTEL_CHIPSET_H
#define _INTEL_CHIPSET_H
#define PCI_CHIP_I810 0x7121
#define PCI_CHIP_I810_DC100 0x7123
#define PCI_CHIP_I810_E 0x7125
#define PCI_CHIP_I815 0x1132
#define PCI_CHIP_I830_M 0x3577
#define PCI_CHIP_845_G 0x2562
#define PCI_CHIP_I855_GM 0x3582
#define PCI_CHIP_I865_G 0x2572
#define PCI_CHIP_I915_G 0x2582
#define PCI_CHIP_E7221_G 0x258A
#define PCI_CHIP_I915_GM 0x2592
#define PCI_CHIP_I945_G 0x2772
#define PCI_CHIP_I945_GM 0x27A2
#define PCI_CHIP_I945_GME 0x27AE
#define PCI_CHIP_Q35_G 0x29B2
#define PCI_CHIP_G33_G 0x29C2
#define PCI_CHIP_Q33_G 0x29D2
#define PCI_CHIP_IGD_GM 0xA011
#define PCI_CHIP_IGD_G 0xA001
#define IS_IGDGM(devid) ((devid) == PCI_CHIP_IGD_GM)
#define IS_IGDG(devid) ((devid) == PCI_CHIP_IGD_G)
#define IS_IGD(devid) (IS_IGDG(devid) || IS_IGDGM(devid))
#define PCI_CHIP_I965_G 0x29A2
#define PCI_CHIP_I965_Q 0x2992
#define PCI_CHIP_I965_G_1 0x2982
#define PCI_CHIP_I946_GZ 0x2972
#define PCI_CHIP_I965_GM 0x2A02
#define PCI_CHIP_I965_GME 0x2A12
#define PCI_CHIP_GM45_GM 0x2A42
#define PCI_CHIP_IGD_E_G 0x2E02
#define PCI_CHIP_Q45_G 0x2E12
#define PCI_CHIP_G45_G 0x2E22
#define PCI_CHIP_G41_G 0x2E32
#define PCI_CHIP_ILD_G 0x0042
#define PCI_CHIP_ILM_G 0x0046
#define PCI_CHIP_SANDYBRIDGE_GT1 0x0102 /* desktop */
#define PCI_CHIP_SANDYBRIDGE_GT2 0x0112
#define PCI_CHIP_SANDYBRIDGE_GT2_PLUS 0x0122
#define PCI_CHIP_SANDYBRIDGE_M_GT1 0x0106 /* mobile */
#define PCI_CHIP_SANDYBRIDGE_M_GT2 0x0116
#define PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS 0x0126
#define PCI_CHIP_SANDYBRIDGE_S 0x010A /* server */
#define PCI_CHIP_IVYBRIDGE_GT1 0x0152 /* desktop */
#define PCI_CHIP_IVYBRIDGE_GT2 0x0162
#define PCI_CHIP_IVYBRIDGE_M_GT1 0x0156 /* mobile */
#define PCI_CHIP_IVYBRIDGE_M_GT2 0x0166
#define PCI_CHIP_IVYBRIDGE_S 0x015a /* server */
#define PCI_CHIP_IVYBRIDGE_S_GT2 0x016a /* server */
#define PCI_CHIP_HASWELL_GT1 0x0402 /* Desktop */
#define PCI_CHIP_HASWELL_GT2 0x0412
#define PCI_CHIP_HASWELL_GT3 0x0422
#define PCI_CHIP_HASWELL_M_GT1 0x0406 /* Mobile */
#define PCI_CHIP_HASWELL_M_GT2 0x0416
#define PCI_CHIP_HASWELL_M_GT3 0x0426
#define PCI_CHIP_HASWELL_S_GT1 0x040A /* Server */
#define PCI_CHIP_HASWELL_S_GT2 0x041A
#define PCI_CHIP_HASWELL_S_GT3 0x042A
#define PCI_CHIP_HASWELL_B_GT1 0x040B /* Reserved */
#define PCI_CHIP_HASWELL_B_GT2 0x041B
#define PCI_CHIP_HASWELL_B_GT3 0x042B
#define PCI_CHIP_HASWELL_E_GT1 0x040E /* Reserved */
#define PCI_CHIP_HASWELL_E_GT2 0x041E
#define PCI_CHIP_HASWELL_E_GT3 0x042E
#define PCI_CHIP_HASWELL_SDV_GT1 0x0C02 /* Desktop */
#define PCI_CHIP_HASWELL_SDV_GT2 0x0C12
#define PCI_CHIP_HASWELL_SDV_GT3 0x0C22
#define PCI_CHIP_HASWELL_SDV_M_GT1 0x0C06 /* Mobile */
#define PCI_CHIP_HASWELL_SDV_M_GT2 0x0C16
#define PCI_CHIP_HASWELL_SDV_M_GT3 0x0C26
#define PCI_CHIP_HASWELL_SDV_S_GT1 0x0C0A /* Server */
#define PCI_CHIP_HASWELL_SDV_S_GT2 0x0C1A
#define PCI_CHIP_HASWELL_SDV_S_GT3 0x0C2A
#define PCI_CHIP_HASWELL_SDV_B_GT1 0x0C0B /* Reserved */
#define PCI_CHIP_HASWELL_SDV_B_GT2 0x0C1B
#define PCI_CHIP_HASWELL_SDV_B_GT3 0x0C2B
#define PCI_CHIP_HASWELL_SDV_E_GT1 0x0C0E /* Reserved */
#define PCI_CHIP_HASWELL_SDV_E_GT2 0x0C1E
#define PCI_CHIP_HASWELL_SDV_E_GT3 0x0C2E
#define PCI_CHIP_HASWELL_ULT_GT1 0x0A02 /* Desktop */
#define PCI_CHIP_HASWELL_ULT_GT2 0x0A12
#define PCI_CHIP_HASWELL_ULT_GT3 0x0A22
#define PCI_CHIP_HASWELL_ULT_M_GT1 0x0A06 /* Mobile */
#define PCI_CHIP_HASWELL_ULT_M_GT2 0x0A16
#define PCI_CHIP_HASWELL_ULT_M_GT3 0x0A26
#define PCI_CHIP_HASWELL_ULT_S_GT1 0x0A0A /* Server */
#define PCI_CHIP_HASWELL_ULT_S_GT2 0x0A1A
#define PCI_CHIP_HASWELL_ULT_S_GT3 0x0A2A
#define PCI_CHIP_HASWELL_ULT_B_GT1 0x0A0B /* Reserved */
#define PCI_CHIP_HASWELL_ULT_B_GT2 0x0A1B
#define PCI_CHIP_HASWELL_ULT_B_GT3 0x0A2B
#define PCI_CHIP_HASWELL_ULT_E_GT1 0x0A0E /* Reserved */
#define PCI_CHIP_HASWELL_ULT_E_GT2 0x0A1E
#define PCI_CHIP_HASWELL_ULT_E_GT3 0x0A2E
#define PCI_CHIP_HASWELL_CRW_GT1 0x0D02 /* Desktop */
#define PCI_CHIP_HASWELL_CRW_GT2 0x0D12
#define PCI_CHIP_HASWELL_CRW_GT3 0x0D22
#define PCI_CHIP_HASWELL_CRW_M_GT1 0x0D06 /* Mobile */
#define PCI_CHIP_HASWELL_CRW_M_GT2 0x0D16
#define PCI_CHIP_HASWELL_CRW_M_GT3 0x0D26
#define PCI_CHIP_HASWELL_CRW_S_GT1 0x0D0A /* Server */
#define PCI_CHIP_HASWELL_CRW_S_GT2 0x0D1A
#define PCI_CHIP_HASWELL_CRW_S_GT3 0x0D2A
#define PCI_CHIP_HASWELL_CRW_B_GT1 0x0D0B /* Reserved */
#define PCI_CHIP_HASWELL_CRW_B_GT2 0x0D1B
#define PCI_CHIP_HASWELL_CRW_B_GT3 0x0D2B
#define PCI_CHIP_HASWELL_CRW_E_GT1 0x0D0E /* Reserved */
#define PCI_CHIP_HASWELL_CRW_E_GT2 0x0D1E
#define PCI_CHIP_HASWELL_CRW_E_GT3 0x0D2E
#define BDW_SPARE 0x2
#define BDW_ULT 0x6
#define BDW_SERVER 0xa
#define BDW_IRIS 0xb
#define BDW_WORKSTATION 0xd
#define BDW_ULX 0xe
#define PCI_CHIP_VALLEYVIEW_PO 0x0f30 /* VLV PO board */
#define PCI_CHIP_VALLEYVIEW_1 0x0f31
#define PCI_CHIP_VALLEYVIEW_2 0x0f32
#define PCI_CHIP_VALLEYVIEW_3 0x0f33
#define PCI_CHIP_CHERRYVIEW_0 0x22b0
#define PCI_CHIP_CHERRYVIEW_1 0x22b1
#define PCI_CHIP_CHERRYVIEW_2 0x22b2
#define PCI_CHIP_CHERRYVIEW_3 0x22b3
#define PCI_CHIP_SKYLAKE_DT_GT1 0x1902
#define PCI_CHIP_SKYLAKE_ULT_GT1 0x1906
#define PCI_CHIP_SKYLAKE_SRV_GT1 0x190A /* Reserved */
#define PCI_CHIP_SKYLAKE_H_GT1 0x190B
#define PCI_CHIP_SKYLAKE_ULX_GT1 0x190E /* Reserved */
#define PCI_CHIP_SKYLAKE_DT_GT2 0x1912
#define PCI_CHIP_SKYLAKE_FUSED0_GT2 0x1913 /* Reserved */
#define PCI_CHIP_SKYLAKE_FUSED1_GT2 0x1915 /* Reserved */
#define PCI_CHIP_SKYLAKE_ULT_GT2 0x1916
#define PCI_CHIP_SKYLAKE_FUSED2_GT2 0x1917 /* Reserved */
#define PCI_CHIP_SKYLAKE_SRV_GT2 0x191A /* Reserved */
#define PCI_CHIP_SKYLAKE_HALO_GT2 0x191B
#define PCI_CHIP_SKYLAKE_WKS_GT2 0x191D
#define PCI_CHIP_SKYLAKE_ULX_GT2 0x191E
#define PCI_CHIP_SKYLAKE_MOBILE_GT2 0x1921 /* Reserved */
#define PCI_CHIP_SKYLAKE_ULT_GT3_0 0x1923
#define PCI_CHIP_SKYLAKE_ULT_GT3_1 0x1926
#define PCI_CHIP_SKYLAKE_ULT_GT3_2 0x1927
#define PCI_CHIP_SKYLAKE_SRV_GT4 0x192A
#define PCI_CHIP_SKYLAKE_HALO_GT3 0x192B /* Reserved */
#define PCI_CHIP_SKYLAKE_SRV_GT3 0x192D
#define PCI_CHIP_SKYLAKE_DT_GT4 0x1932
#define PCI_CHIP_SKYLAKE_SRV_GT4X 0x193A
#define PCI_CHIP_SKYLAKE_H_GT4 0x193B
#define PCI_CHIP_SKYLAKE_WKS_GT4 0x193D
#define PCI_CHIP_KABYLAKE_ULT_GT2 0x5916
#define PCI_CHIP_KABYLAKE_ULT_GT1_5 0x5913
#define PCI_CHIP_KABYLAKE_ULT_GT1 0x5906
#define PCI_CHIP_KABYLAKE_ULT_GT3_0 0x5923
#define PCI_CHIP_KABYLAKE_ULT_GT3_1 0x5926
#define PCI_CHIP_KABYLAKE_ULT_GT3_2 0x5927
#define PCI_CHIP_KABYLAKE_ULT_GT2F 0x5921
#define PCI_CHIP_KABYLAKE_ULX_GT1_5 0x5915
#define PCI_CHIP_KABYLAKE_ULX_GT1 0x590E
#define PCI_CHIP_KABYLAKE_ULX_GT2 0x591E
#define PCI_CHIP_KABYLAKE_DT_GT2 0x5912
#define PCI_CHIP_KABYLAKE_DT_GT1_5 0x5917
#define PCI_CHIP_KABYLAKE_DT_GT1 0x5902
#define PCI_CHIP_KABYLAKE_HALO_GT2 0x591B
#define PCI_CHIP_KABYLAKE_HALO_GT4 0x593B
#define PCI_CHIP_KABYLAKE_HALO_GT1_0 0x5908
#define PCI_CHIP_KABYLAKE_HALO_GT1_1 0x590B
#define PCI_CHIP_KABYLAKE_SRV_GT2 0x591A
#define PCI_CHIP_KABYLAKE_SRV_GT1 0x590A
#define PCI_CHIP_KABYLAKE_WKS_GT2 0x591D
#define PCI_CHIP_BROXTON_0 0x0A84
#define PCI_CHIP_BROXTON_1 0x1A84
#define PCI_CHIP_BROXTON_2 0x5A84
#define PCI_CHIP_BROXTON_3 0x1A85
#define PCI_CHIP_BROXTON_4 0x5A85
#define PCI_CHIP_GLK 0x3184
#define PCI_CHIP_GLK_2X6 0x3185
#define IS_MOBILE(devid) ((devid) == PCI_CHIP_I855_GM || \
(devid) == PCI_CHIP_I915_GM || \
(devid) == PCI_CHIP_I945_GM || \
(devid) == PCI_CHIP_I945_GME || \
(devid) == PCI_CHIP_I965_GM || \
(devid) == PCI_CHIP_I965_GME || \
(devid) == PCI_CHIP_GM45_GM || IS_IGD(devid) || \
(devid) == PCI_CHIP_IVYBRIDGE_M_GT1 || \
(devid) == PCI_CHIP_IVYBRIDGE_M_GT2)
#define IS_G45(devid) ((devid) == PCI_CHIP_IGD_E_G || \
(devid) == PCI_CHIP_Q45_G || \
(devid) == PCI_CHIP_G45_G || \
(devid) == PCI_CHIP_G41_G)
#define IS_GM45(devid) ((devid) == PCI_CHIP_GM45_GM)
#define IS_G4X(devid) (IS_G45(devid) || IS_GM45(devid))
#define IS_ILD(devid) ((devid) == PCI_CHIP_ILD_G)
#define IS_ILM(devid) ((devid) == PCI_CHIP_ILM_G)
#define IS_915(devid) ((devid) == PCI_CHIP_I915_G || \
(devid) == PCI_CHIP_E7221_G || \
(devid) == PCI_CHIP_I915_GM)
#define IS_945GM(devid) ((devid) == PCI_CHIP_I945_GM || \
(devid) == PCI_CHIP_I945_GME)
#define IS_945(devid) ((devid) == PCI_CHIP_I945_G || \
(devid) == PCI_CHIP_I945_GM || \
(devid) == PCI_CHIP_I945_GME || \
IS_G33(devid))
#define IS_G33(devid) ((devid) == PCI_CHIP_G33_G || \
(devid) == PCI_CHIP_Q33_G || \
(devid) == PCI_CHIP_Q35_G || IS_IGD(devid))
#define IS_GEN2(devid) ((devid) == PCI_CHIP_I830_M || \
(devid) == PCI_CHIP_845_G || \
(devid) == PCI_CHIP_I855_GM || \
(devid) == PCI_CHIP_I865_G)
#define IS_GEN3(devid) (IS_945(devid) || IS_915(devid))
#define IS_GEN4(devid) ((devid) == PCI_CHIP_I965_G || \
(devid) == PCI_CHIP_I965_Q || \
(devid) == PCI_CHIP_I965_G_1 || \
(devid) == PCI_CHIP_I965_GM || \
(devid) == PCI_CHIP_I965_GME || \
(devid) == PCI_CHIP_I946_GZ || \
IS_G4X(devid))
#define IS_GEN5(devid) (IS_ILD(devid) || IS_ILM(devid))
#define IS_GEN6(devid) ((devid) == PCI_CHIP_SANDYBRIDGE_GT1 || \
(devid) == PCI_CHIP_SANDYBRIDGE_GT2 || \
(devid) == PCI_CHIP_SANDYBRIDGE_GT2_PLUS || \
(devid) == PCI_CHIP_SANDYBRIDGE_M_GT1 || \
(devid) == PCI_CHIP_SANDYBRIDGE_M_GT2 || \
(devid) == PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS || \
(devid) == PCI_CHIP_SANDYBRIDGE_S)
#define IS_GEN7(devid) (IS_IVYBRIDGE(devid) || \
IS_HASWELL(devid) || \
IS_VALLEYVIEW(devid))
#define IS_IVYBRIDGE(devid) ((devid) == PCI_CHIP_IVYBRIDGE_GT1 || \
(devid) == PCI_CHIP_IVYBRIDGE_GT2 || \
(devid) == PCI_CHIP_IVYBRIDGE_M_GT1 || \
(devid) == PCI_CHIP_IVYBRIDGE_M_GT2 || \
(devid) == PCI_CHIP_IVYBRIDGE_S || \
(devid) == PCI_CHIP_IVYBRIDGE_S_GT2)
#define IS_VALLEYVIEW(devid) ((devid) == PCI_CHIP_VALLEYVIEW_PO || \
(devid) == PCI_CHIP_VALLEYVIEW_1 || \
(devid) == PCI_CHIP_VALLEYVIEW_2 || \
(devid) == PCI_CHIP_VALLEYVIEW_3)
#define IS_HSW_GT1(devid) ((devid) == PCI_CHIP_HASWELL_GT1 || \
(devid) == PCI_CHIP_HASWELL_M_GT1 || \
(devid) == PCI_CHIP_HASWELL_S_GT1 || \
(devid) == PCI_CHIP_HASWELL_B_GT1 || \
(devid) == PCI_CHIP_HASWELL_E_GT1 || \
(devid) == PCI_CHIP_HASWELL_SDV_GT1 || \
(devid) == PCI_CHIP_HASWELL_SDV_M_GT1 || \
(devid) == PCI_CHIP_HASWELL_SDV_S_GT1 || \
(devid) == PCI_CHIP_HASWELL_SDV_B_GT1 || \
(devid) == PCI_CHIP_HASWELL_SDV_E_GT1 || \
(devid) == PCI_CHIP_HASWELL_ULT_GT1 || \
(devid) == PCI_CHIP_HASWELL_ULT_M_GT1 || \
(devid) == PCI_CHIP_HASWELL_ULT_S_GT1 || \
(devid) == PCI_CHIP_HASWELL_ULT_B_GT1 || \
(devid) == PCI_CHIP_HASWELL_ULT_E_GT1 || \
(devid) == PCI_CHIP_HASWELL_CRW_GT1 || \
(devid) == PCI_CHIP_HASWELL_CRW_M_GT1 || \
(devid) == PCI_CHIP_HASWELL_CRW_S_GT1 || \
(devid) == PCI_CHIP_HASWELL_CRW_B_GT1 || \
(devid) == PCI_CHIP_HASWELL_CRW_E_GT1)
#define IS_HSW_GT2(devid) ((devid) == PCI_CHIP_HASWELL_GT2 || \
(devid) == PCI_CHIP_HASWELL_M_GT2 || \
(devid) == PCI_CHIP_HASWELL_S_GT2 || \
(devid) == PCI_CHIP_HASWELL_B_GT2 || \
(devid) == PCI_CHIP_HASWELL_E_GT2 || \
(devid) == PCI_CHIP_HASWELL_SDV_GT2 || \
(devid) == PCI_CHIP_HASWELL_SDV_M_GT2 || \
(devid) == PCI_CHIP_HASWELL_SDV_S_GT2 || \
(devid) == PCI_CHIP_HASWELL_SDV_B_GT2 || \
(devid) == PCI_CHIP_HASWELL_SDV_E_GT2 || \
(devid) == PCI_CHIP_HASWELL_ULT_GT2 || \
(devid) == PCI_CHIP_HASWELL_ULT_M_GT2 || \
(devid) == PCI_CHIP_HASWELL_ULT_S_GT2 || \
(devid) == PCI_CHIP_HASWELL_ULT_B_GT2 || \
(devid) == PCI_CHIP_HASWELL_ULT_E_GT2 || \
(devid) == PCI_CHIP_HASWELL_CRW_GT2 || \
(devid) == PCI_CHIP_HASWELL_CRW_M_GT2 || \
(devid) == PCI_CHIP_HASWELL_CRW_S_GT2 || \
(devid) == PCI_CHIP_HASWELL_CRW_B_GT2 || \
(devid) == PCI_CHIP_HASWELL_CRW_E_GT2)
#define IS_HSW_GT3(devid) ((devid) == PCI_CHIP_HASWELL_GT3 || \
(devid) == PCI_CHIP_HASWELL_M_GT3 || \
(devid) == PCI_CHIP_HASWELL_S_GT3 || \
(devid) == PCI_CHIP_HASWELL_B_GT3 || \
(devid) == PCI_CHIP_HASWELL_E_GT3 || \
(devid) == PCI_CHIP_HASWELL_SDV_GT3 || \
(devid) == PCI_CHIP_HASWELL_SDV_M_GT3 || \
(devid) == PCI_CHIP_HASWELL_SDV_S_GT3 || \
(devid) == PCI_CHIP_HASWELL_SDV_B_GT3 || \
(devid) == PCI_CHIP_HASWELL_SDV_E_GT3 || \
(devid) == PCI_CHIP_HASWELL_ULT_GT3 || \
(devid) == PCI_CHIP_HASWELL_ULT_M_GT3 || \
(devid) == PCI_CHIP_HASWELL_ULT_S_GT3 || \
(devid) == PCI_CHIP_HASWELL_ULT_B_GT3 || \
(devid) == PCI_CHIP_HASWELL_ULT_E_GT3 || \
(devid) == PCI_CHIP_HASWELL_CRW_GT3 || \
(devid) == PCI_CHIP_HASWELL_CRW_M_GT3 || \
(devid) == PCI_CHIP_HASWELL_CRW_S_GT3 || \
(devid) == PCI_CHIP_HASWELL_CRW_B_GT3 || \
(devid) == PCI_CHIP_HASWELL_CRW_E_GT3)
#define IS_HASWELL(devid) (IS_HSW_GT1(devid) || \
IS_HSW_GT2(devid) || \
IS_HSW_GT3(devid))
#define IS_BROADWELL(devid) (((devid & 0xff00) != 0x1600) ? 0 : \
(((devid & 0x00f0) >> 4) > 3) ? 0 : \
((devid & 0x000f) == BDW_SPARE) ? 1 : \
((devid & 0x000f) == BDW_ULT) ? 1 : \
((devid & 0x000f) == BDW_IRIS) ? 1 : \
((devid & 0x000f) == BDW_SERVER) ? 1 : \
((devid & 0x000f) == BDW_WORKSTATION) ? 1 : \
((devid & 0x000f) == BDW_ULX) ? 1 : 0)
#define IS_CHERRYVIEW(devid) ((devid) == PCI_CHIP_CHERRYVIEW_0 || \
(devid) == PCI_CHIP_CHERRYVIEW_1 || \
(devid) == PCI_CHIP_CHERRYVIEW_2 || \
(devid) == PCI_CHIP_CHERRYVIEW_3)
#define IS_GEN8(devid) (IS_BROADWELL(devid) || \
IS_CHERRYVIEW(devid))
#define IS_SKL_GT1(devid) ((devid) == PCI_CHIP_SKYLAKE_DT_GT1 || \
(devid) == PCI_CHIP_SKYLAKE_ULT_GT1 || \
(devid) == PCI_CHIP_SKYLAKE_SRV_GT1 || \
(devid) == PCI_CHIP_SKYLAKE_H_GT1 || \
(devid) == PCI_CHIP_SKYLAKE_ULX_GT1)
#define IS_SKL_GT2(devid) ((devid) == PCI_CHIP_SKYLAKE_DT_GT2 || \
(devid) == PCI_CHIP_SKYLAKE_FUSED0_GT2 || \
(devid) == PCI_CHIP_SKYLAKE_FUSED1_GT2 || \
(devid) == PCI_CHIP_SKYLAKE_ULT_GT2 || \
(devid) == PCI_CHIP_SKYLAKE_FUSED2_GT2 || \
(devid) == PCI_CHIP_SKYLAKE_SRV_GT2 || \
(devid) == PCI_CHIP_SKYLAKE_HALO_GT2 || \
(devid) == PCI_CHIP_SKYLAKE_WKS_GT2 || \
(devid) == PCI_CHIP_SKYLAKE_ULX_GT2 || \
(devid) == PCI_CHIP_SKYLAKE_MOBILE_GT2)
#define IS_SKL_GT3(devid) ((devid) == PCI_CHIP_SKYLAKE_ULT_GT3_0 || \
(devid) == PCI_CHIP_SKYLAKE_ULT_GT3_1 || \
(devid) == PCI_CHIP_SKYLAKE_ULT_GT3_2 || \
(devid) == PCI_CHIP_SKYLAKE_HALO_GT3 || \
(devid) == PCI_CHIP_SKYLAKE_SRV_GT3)
#define IS_SKL_GT4(devid) ((devid) == PCI_CHIP_SKYLAKE_SRV_GT4 || \
(devid) == PCI_CHIP_SKYLAKE_DT_GT4 || \
(devid) == PCI_CHIP_SKYLAKE_SRV_GT4X || \
(devid) == PCI_CHIP_SKYLAKE_H_GT4 || \
(devid) == PCI_CHIP_SKYLAKE_WKS_GT4)
#define IS_KBL_GT1(devid) ((devid) == PCI_CHIP_KABYLAKE_ULT_GT1_5 || \
(devid) == PCI_CHIP_KABYLAKE_ULX_GT1_5 || \
(devid) == PCI_CHIP_KABYLAKE_DT_GT1_5 || \
(devid) == PCI_CHIP_KABYLAKE_ULT_GT1 || \
(devid) == PCI_CHIP_KABYLAKE_ULX_GT1 || \
(devid) == PCI_CHIP_KABYLAKE_DT_GT1 || \
(devid) == PCI_CHIP_KABYLAKE_HALO_GT1_0 || \
(devid) == PCI_CHIP_KABYLAKE_HALO_GT1_1 || \
(devid) == PCI_CHIP_KABYLAKE_SRV_GT1)
#define IS_KBL_GT2(devid) ((devid) == PCI_CHIP_KABYLAKE_ULT_GT2 || \
(devid) == PCI_CHIP_KABYLAKE_ULT_GT2F || \
(devid) == PCI_CHIP_KABYLAKE_ULX_GT2 || \
(devid) == PCI_CHIP_KABYLAKE_DT_GT2 || \
(devid) == PCI_CHIP_KABYLAKE_HALO_GT2 || \
(devid) == PCI_CHIP_KABYLAKE_SRV_GT2 || \
(devid) == PCI_CHIP_KABYLAKE_WKS_GT2)
#define IS_KBL_GT3(devid) ((devid) == PCI_CHIP_KABYLAKE_ULT_GT3_0 || \
(devid) == PCI_CHIP_KABYLAKE_ULT_GT3_1 || \
(devid) == PCI_CHIP_KABYLAKE_ULT_GT3_2)
#define IS_KBL_GT4(devid) ((devid) == PCI_CHIP_KABYLAKE_HALO_GT4)
#define IS_KABYLAKE(devid) (IS_KBL_GT1(devid) || \
IS_KBL_GT2(devid) || \
IS_KBL_GT3(devid) || \
IS_KBL_GT4(devid))
#define IS_SKYLAKE(devid) (IS_SKL_GT1(devid) || \
IS_SKL_GT2(devid) || \
IS_SKL_GT3(devid) || \
IS_SKL_GT4(devid))
#define IS_BROXTON(devid) ((devid) == PCI_CHIP_BROXTON_0 || \
(devid) == PCI_CHIP_BROXTON_1 || \
(devid) == PCI_CHIP_BROXTON_2 || \
(devid) == PCI_CHIP_BROXTON_3 || \
(devid) == PCI_CHIP_BROXTON_4)
#define IS_GEMINILAKE(devid) ((devid) == PCI_CHIP_GLK || \
(devid) == PCI_CHIP_GLK_2X6)
#define IS_GEN9(devid) (IS_SKYLAKE(devid) || \
IS_BROXTON(devid) || \
IS_KABYLAKE(devid) || \
IS_GEMINILAKE(devid))
#define IS_9XX(dev) (IS_GEN3(dev) || \
IS_GEN4(dev) || \
IS_GEN5(dev) || \
IS_GEN6(dev) || \
IS_GEN7(dev) || \
IS_GEN8(dev) || \
IS_GEN9(dev))
#endif /* _INTEL_CHIPSET_H */

View File

@ -0,0 +1,118 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* List macros heavily inspired by the Linux kernel
* list handling. No list looping yet.
*/
#include <stddef.h>
typedef struct _drmMMListHead
{
struct _drmMMListHead *prev;
struct _drmMMListHead *next;
} drmMMListHead;
#define DRMINITLISTHEAD(__item) \
do{ \
(__item)->prev = (__item); \
(__item)->next = (__item); \
} while (0)
#define DRMLISTADD(__item, __list) \
do { \
(__item)->prev = (__list); \
(__item)->next = (__list)->next; \
(__list)->next->prev = (__item); \
(__list)->next = (__item); \
} while (0)
#define DRMLISTADDTAIL(__item, __list) \
do { \
(__item)->next = (__list); \
(__item)->prev = (__list)->prev; \
(__list)->prev->next = (__item); \
(__list)->prev = (__item); \
} while(0)
#define DRMLISTDEL(__item) \
do { \
(__item)->prev->next = (__item)->next; \
(__item)->next->prev = (__item)->prev; \
} while(0)
#define DRMLISTDELINIT(__item) \
do { \
(__item)->prev->next = (__item)->next; \
(__item)->next->prev = (__item)->prev; \
(__item)->next = (__item); \
(__item)->prev = (__item); \
} while(0)
#define DRMLISTENTRY(__type, __item, __field) \
((__type *)(((char *) (__item)) - offsetof(__type, __field)))
#define DRMLISTEMPTY(__item) ((__item)->next == (__item))
#define DRMLISTSINGLE(__list) \
(!DRMLISTEMPTY(__list) && ((__list)->next == (__list)->prev))
#define DRMLISTFOREACH(__item, __list) \
for ((__item) = (__list)->next; \
(__item) != (__list); (__item) = (__item)->next)
#define DRMLISTFOREACHSAFE(__item, __temp, __list) \
for ((__item) = (__list)->next, (__temp) = (__item)->next; \
(__item) != (__list); \
(__item) = (__temp), (__temp) = (__item)->next)
#define DRMLISTFOREACHSAFEREVERSE(__item, __temp, __list) \
for ((__item) = (__list)->prev, (__temp) = (__item)->prev; \
(__item) != (__list); \
(__item) = (__temp), (__temp) = (__item)->prev)
#define DRMLISTFOREACHENTRY(__item, __list, __head) \
for ((__item) = DRMLISTENTRY(typeof(*__item), (__list)->next, __head); \
&(__item)->__head != (__list); \
(__item) = DRMLISTENTRY(typeof(*__item), \
(__item)->__head.next, __head))
#define DRMLISTFOREACHENTRYSAFE(__item, __temp, __list, __head) \
for ((__item) = DRMLISTENTRY(typeof(*__item), (__list)->next, __head), \
(__temp) = DRMLISTENTRY(typeof(*__item), \
(__item)->__head.next, __head); \
&(__item)->__head != (__list); \
(__item) = (__temp), \
(__temp) = DRMLISTENTRY(typeof(*__item), \
(__temp)->__head.next, __head))
#define DRMLISTJOIN(__list, __join) if (!DRMLISTEMPTY(__list)) { \
(__list)->next->prev = (__join); \
(__list)->prev->next = (__join)->next; \
(__join)->next->prev = (__list)->prev; \
(__join)->next = (__list)->next; \
}

View File

@ -0,0 +1,87 @@
/*
* Copyright © 2014 NVIDIA Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef LIBDRM_LIBDRM_H
#define LIBDRM_LIBDRM_H
#if defined(HAVE_VISIBILITY)
# define drm_private __attribute__((visibility("hidden")))
#else
# define drm_private
#endif
/**
* Static (compile-time) assertion.
* Basically, use COND to dimension an array. If COND is false/zero the
* array size will be -1 and we'll get a compilation error.
*/
#define STATIC_ASSERT(COND) \
do { \
(void) sizeof(char [1 - 2*!(COND)]); \
} while (0)
#include <sys/mman.h>
#if defined(ANDROID) && !defined(__LP64__)
#include <errno.h> /* for EINVAL */
extern void *__mmap2(void *, size_t, int, int, int, size_t);
static inline void *drm_mmap(void *addr, size_t length, int prot, int flags,
int fd, loff_t offset)
{
/* offset must be aligned to 4096 (not necessarily the page size) */
if (offset & 4095) {
errno = EINVAL;
return MAP_FAILED;
}
return __mmap2(addr, length, prot, flags, fd, (size_t) (offset >> 12));
}
# define drm_munmap(addr, length) \
munmap(addr, length)
#else
/* assume large file support exists */
# define drm_mmap(addr, length, prot, flags, fd, offset) \
mmap(addr, length, prot, flags, fd, offset)
static inline int drm_munmap(void *addr, size_t length)
{
/* Copied from configure code generated by AC_SYS_LARGEFILE */
#define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + \
(((off_t) 1 << 31) << 31))
STATIC_ASSERT(LARGE_OFF_T % 2147483629 == 721 &&
LARGE_OFF_T % 2147483647 == 1);
#undef LARGE_OFF_T
return munmap(addr, length);
}
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,117 @@
/*
* Copyright © 2009 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Chris Wilson <chris@chris-wilson.co.uk>
*
*/
/**
* @file xf86atomics.h
*
* Private definitions for atomic operations
*/
#ifndef LIBDRM_ATOMICS_H
#define LIBDRM_ATOMICS_H
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#if HAVE_LIBDRM_ATOMIC_PRIMITIVES
#define HAS_ATOMIC_OPS 1
typedef struct {
int atomic;
} atomic_t;
# define atomic_read(x) ((x)->atomic)
# define atomic_set(x, val) ((x)->atomic = (val))
# define atomic_inc(x) ((void) __sync_fetch_and_add (&(x)->atomic, 1))
# define atomic_inc_return(x) (__sync_add_and_fetch (&(x)->atomic, 1))
# define atomic_dec_and_test(x) (__sync_add_and_fetch (&(x)->atomic, -1) == 0)
# define atomic_add(x, v) ((void) __sync_add_and_fetch(&(x)->atomic, (v)))
# define atomic_dec(x, v) ((void) __sync_sub_and_fetch(&(x)->atomic, (v)))
# define atomic_cmpxchg(x, oldv, newv) __sync_val_compare_and_swap (&(x)->atomic, oldv, newv)
#endif
#if HAVE_LIB_ATOMIC_OPS
#include <atomic_ops.h>
#define HAS_ATOMIC_OPS 1
typedef struct {
AO_t atomic;
} atomic_t;
# define atomic_read(x) AO_load_full(&(x)->atomic)
# define atomic_set(x, val) AO_store_full(&(x)->atomic, (val))
# define atomic_inc(x) ((void) AO_fetch_and_add1_full(&(x)->atomic))
# define atomic_inc_return(x) (AO_fetch_and_add1_full(&(x)->atomic) + 1)
# define atomic_add(x, v) ((void) AO_fetch_and_add_full(&(x)->atomic, (v)))
# define atomic_dec(x, v) ((void) AO_fetch_and_add_full(&(x)->atomic, -(v)))
# define atomic_dec_and_test(x) (AO_fetch_and_sub1_full(&(x)->atomic) == 1)
# define atomic_cmpxchg(x, oldv, newv) AO_compare_and_swap_full(&(x)->atomic, oldv, newv)
#endif
#if (defined(__sun) || defined(__NetBSD__)) && !defined(HAS_ATOMIC_OPS) /* Solaris & OpenSolaris & NetBSD */
#include <sys/atomic.h>
#define HAS_ATOMIC_OPS 1
#if defined(__NetBSD__)
#define LIBDRM_ATOMIC_TYPE int
#else
#define LIBDRM_ATOMIC_TYPE uint_t
#endif
typedef struct { LIBDRM_ATOMIC_TYPE atomic; } atomic_t;
# define atomic_read(x) (int) ((x)->atomic)
# define atomic_set(x, val) ((x)->atomic = (LIBDRM_ATOMIC_TYPE)(val))
# define atomic_inc(x) (atomic_inc_uint (&(x)->atomic))
# define atomic_inc_return(x) (atomic_inc_uint_nv(&(x)->atomic))
# define atomic_dec_and_test(x) (atomic_dec_uint_nv(&(x)->atomic) == 0)
# define atomic_add(x, v) (atomic_add_int(&(x)->atomic, (v)))
# define atomic_dec(x, v) (atomic_add_int(&(x)->atomic, -(v)))
# define atomic_cmpxchg(x, oldv, newv) atomic_cas_uint (&(x)->atomic, oldv, newv)
#endif
#if ! HAS_ATOMIC_OPS
#error libdrm requires atomic operations, please define them for your CPU/compiler.
#endif
static inline int atomic_add_unless(atomic_t *v, int add, int unless)
{
int c, old;
c = atomic_read(v);
while (c != unless && (old = atomic_cmpxchg(v, c, c + add)) != c)
c = old;
return c == unless;
}
#endif