winsys/amdgpu: add a new winsys for the new kernel driver

v2: - lots of changes according to Emil Velikov's comments
    - implemented radeon_winsys::read_registers

v3: - a lot of new work, many of them adapt to libdrm interface changes
Squashed patches:
winsys/amdgpu: implement radeon_winsys context support
winsys/amdgpu: add reference counting for contexts
winsys/amdgpu: add userptr support
winsys/amdgpu: allocate IBs like normal buffers
winsys/amdgpu: add IBs to the buffer list, adapt to interface changes
winsys/amdgpu: don't use KMS handles as reloc hash keys
winsys/amdgpu: sync buffer accesses to different rings
winsys/amdgpu: use dependencies instead of waiting for last fence v2
gallium/radeon: unify buffer_wait and buffer_is_busy in the winsys interface (amdgpu part)
winsys/amdgpu: track fences per ring and be thread-safe
winsys/amdgpu: simplify waiting on a variable in amdgpu_fence_wait
gallium/radeon: allow the winsys to choose the IB size (amdgpu part)
winsys/amdgpu: switch to new amdgpu_cs_query_fence_status interface
winsys/amdgpu: handle fence and dependencies merge
winsys/amdgpu follow libdrm change to move user fence into UMD
winsys/amdgpu: use amdgpu_bo_va_op for va map/unmap v2
winsys/amdgpu: use the new tiling flags
winsys/amdgpu: switch to new GTT_USWC definition
winsys/amdgpu: expose amdgpu_cs_query_reset_state to drivers
winsys/amdgpu: fix valgrind warnings
winsys/amdgpu: don't use VRAM with APUs that don't have much of it
winsys/amdgpu: require LLVM 3.6.1 for VI because of bug fixes there
winsys/amdgpu: remove amdgpu_winsys::num_cpus
winsys/amdgpu: align BO size to page size
winsys/amdgpu: reduce BO cache timeout
winsys/amdgpu: remove useless flushing and waiting in amdgpu_bo_set_tiling
winsys/amdgpu: use amdgpu_device_handle as a unique device ID instead of fd
winsys/amdgpu: use safer access to amdgpu_fence_wait::signalled
winsys/amdgpu: allow maximum IB size of 4 MB
winsys/amdgpu: add ip_instance into amdgpu_fence
gallium/radeon: add RING_COMPUTE instead of RADEON_FLUSH_COMPUTE
winsys/amdgpu: set the ring type at CS initilization
winsys/amdgpu: query the GART page size from the kernel
winsys/amdgpu: correctly wait for shared buffers to become idle
winsys/amdgpu: set the amdgpu_cs_fence structure only once at fence creation
winsys/amdgpu: add a specific error message for cs_submit -> -ENOMEM
winsys/amdgpu: check num_active_ioctls before calling amdgpu_bo_wait_for_idle
winsys/amdgpu: clear user fence BO after allocating it
winsys/amdgpu: fix user fences
winsys/amdgpu: make amdgpu_winsys_create public
winsys/amdgpu: remove thread offloading
winsys/amdgpu: flatten the amdgpu_cs_context structure and simplify more

v4: require libdrm 2.4.63
This commit is contained in:
Marek Olšák 2015-04-16 22:43:23 +02:00
parent 5609a6986f
commit 2eb067db0f
22 changed files with 2382 additions and 8 deletions

View File

@ -70,6 +70,7 @@ AC_SUBST([OPENCL_VERSION])
dnl Versions for external dependencies
LIBDRM_REQUIRED=2.4.60
LIBDRM_RADEON_REQUIRED=2.4.56
LIBDRM_AMDGPU_REQUIRED=2.4.63
LIBDRM_INTEL_REQUIRED=2.4.61
LIBDRM_NVVIEUX_REQUIRED=2.4.33
LIBDRM_NOUVEAU_REQUIRED=2.4.62
@ -2105,6 +2106,7 @@ if test -n "$with_gallium_drivers"; then
xradeonsi)
HAVE_GALLIUM_RADEONSI=yes
PKG_CHECK_MODULES([RADEON], [libdrm_radeon >= $LIBDRM_RADEON_REQUIRED])
PKG_CHECK_MODULES([AMDGPU], [libdrm_amdgpu >= $LIBDRM_AMDGPU_REQUIRED])
gallium_require_drm "radeonsi"
gallium_require_drm_loader
radeon_llvm_check "radeonsi"
@ -2357,6 +2359,7 @@ AC_CONFIG_FILES([Makefile
src/gallium/winsys/intel/drm/Makefile
src/gallium/winsys/nouveau/drm/Makefile
src/gallium/winsys/radeon/drm/Makefile
src/gallium/winsys/amdgpu/drm/Makefile
src/gallium/winsys/svga/drm/Makefile
src/gallium/winsys/sw/dri/Makefile
src/gallium/winsys/sw/kms-dri/Makefile

View File

@ -72,6 +72,7 @@ SUBDIRS += drivers/r600
endif
ifneq ($(filter radeonsi, $(MESA_GPU_DRIVERS)),)
SUBDIRS += drivers/radeonsi
SUBDIRS += winsys/amdgpu/drm
endif
endif
endif

View File

@ -58,6 +58,7 @@ endif
## radeonsi
if HAVE_GALLIUM_RADEONSI
SUBDIRS += drivers/radeonsi
SUBDIRS += winsys/amdgpu/drm
endif
## the radeon winsys - linked in by r300, r600 and radeonsi

View File

@ -42,6 +42,7 @@
#if GALLIUM_RADEONSI
#include "radeon/radeon_winsys.h"
#include "radeon/drm/radeon_drm_public.h"
#include "amdgpu/drm/amdgpu_public.h"
#include "radeonsi/si_public.h"
#endif
@ -228,7 +229,12 @@ pipe_radeonsi_create_screen(int fd)
{
struct radeon_winsys *rw;
rw = radeon_drm_winsys_create(fd, radeonsi_screen_create);
/* First, try amdgpu. */
rw = amdgpu_winsys_create(fd, radeonsi_screen_create);
if (!rw)
rw = radeon_drm_winsys_create(fd, radeonsi_screen_create);
return rw ? debug_screen_wrap(rw->screen) : NULL;
}
#endif

View File

@ -44,8 +44,7 @@
#define RADEON_FLUSH_ASYNC (1 << 0)
#define RADEON_FLUSH_KEEP_TILING_FLAGS (1 << 1) /* needs DRM 2.12.0 */
#define RADEON_FLUSH_COMPUTE (1 << 2)
#define RADEON_FLUSH_END_OF_FRAME (1 << 3)
#define RADEON_FLUSH_END_OF_FRAME (1 << 2)
/* Tiling flags. */
enum radeon_bo_layout {
@ -134,6 +133,9 @@ enum radeon_family {
CHIP_KABINI,
CHIP_HAWAII,
CHIP_MULLINS,
CHIP_TONGA,
CHIP_ICELAND,
CHIP_CARRIZO,
CHIP_LAST,
};
@ -148,10 +150,12 @@ enum chip_class {
CAYMAN,
SI,
CIK,
VI,
};
enum ring_type {
RING_GFX = 0,
RING_COMPUTE,
RING_DMA,
RING_UVD,
RING_VCE,
@ -517,6 +521,11 @@ struct radeon_winsys {
*/
void (*ctx_destroy)(struct radeon_winsys_ctx *ctx);
/**
* Query a GPU reset status.
*/
enum pipe_reset_status (*ctx_query_reset_status)(struct radeon_winsys_ctx *ctx);
/**
* Create a command stream.
*

View File

@ -5,10 +5,12 @@ TARGET_CPPFLAGS += -DGALLIUM_RADEONSI
TARGET_LIB_DEPS += \
$(top_builddir)/src/gallium/drivers/radeonsi/libradeonsi.la \
$(RADEON_LIBS) \
$(LIBDRM_LIBS)
$(LIBDRM_LIBS) \
$(AMDGPU_LIBS)
TARGET_RADEON_WINSYS = \
$(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la
$(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la \
$(top_builddir)/src/gallium/winsys/amdgpu/drm/libamdgpuwinsys.la
TARGET_RADEON_COMMON = \
$(top_builddir)/src/gallium/drivers/radeon/libradeon.la

View File

@ -1,4 +1,5 @@
{
nouveau_drm_screen_create;
radeon_drm_winsys_create;
amdgpu_winsys_create;
};

View File

@ -4,6 +4,7 @@
__driDriverGetExtensions*;
nouveau_drm_screen_create;
radeon_drm_winsys_create;
amdgpu_winsys_create;
local:
*;
};

View File

@ -155,10 +155,12 @@ nodist_EXTRA_pipe_radeonsi_la_SOURCES = dummy.cpp
pipe_radeonsi_la_LIBADD = \
$(PIPE_LIBS) \
$(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la \
$(top_builddir)/src/gallium/winsys/amdgpu/drm/libamdgpuwinsys.la \
$(top_builddir)/src/gallium/drivers/radeon/libradeon.la \
$(top_builddir)/src/gallium/drivers/radeonsi/libradeonsi.la \
$(LIBDRM_LIBS) \
$(RADEON_LIBS)
$(RADEON_LIBS) \
$(AMDGPU_LIBS)
endif

View File

@ -2,6 +2,7 @@
#include "target-helpers/inline_debug_helper.h"
#include "radeon/drm/radeon_drm_public.h"
#include "radeon/radeon_winsys.h"
#include "amdgpu/drm/amdgpu_public.h"
#include "radeonsi/si_public.h"
static struct pipe_screen *
@ -9,7 +10,12 @@ create_screen(int fd)
{
struct radeon_winsys *rw;
rw = radeon_drm_winsys_create(fd, radeonsi_screen_create);
/* First, try amdgpu. */
rw = amdgpu_winsys_create(fd, radeonsi_screen_create);
if (!rw)
rw = radeon_drm_winsys_create(fd, radeonsi_screen_create);
return rw ? debug_screen_wrap(rw->screen) : NULL;
}

View File

@ -3,6 +3,7 @@
vdp_imp_device_create_x11;
nouveau_drm_screen_create;
radeon_drm_winsys_create;
amdgpu_winsys_create;
local:
*;
};

View File

@ -0,0 +1,37 @@
# Mesa 3-D graphics library
#
# Copyright (C) 2011 Chia-I Wu <olvaffe@gmail.com>
# Copyright (C) 2011 LunarG Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
LOCAL_PATH := $(call my-dir)
# get C_SOURCES
include $(LOCAL_PATH)/Makefile.sources
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(C_SOURCES)
LOCAL_SHARED_LIBRARIES := libdrm libdrm_amdgpu
LOCAL_MODULE := libmesa_winsys_amdgpu
include $(GALLIUM_COMMON_MK)
include $(BUILD_STATIC_LIBRARY)

View File

@ -0,0 +1,12 @@
include Makefile.sources
include $(top_srcdir)/src/gallium/Automake.inc
AM_CFLAGS = \
$(GALLIUM_WINSYS_CFLAGS) \
$(AMDGPU_CFLAGS)
AM_CXXFLAGS = $(AM_CFLAGS)
noinst_LTLIBRARIES = libamdgpuwinsys.la
libamdgpuwinsys_la_SOURCES = $(C_SOURCES)

View File

@ -0,0 +1,8 @@
C_SOURCES := \
amdgpu_bo.c \
amdgpu_bo.h \
amdgpu_cs.c \
amdgpu_cs.h \
amdgpu_public.h \
amdgpu_winsys.c \
amdgpu_winsys.h

View File

@ -0,0 +1,778 @@
/*
* Copyright © 2011 Marek Olšák <maraeo@gmail.com>
* Copyright © 2015 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Marek Olšák <maraeo@gmail.com>
*/
#include "amdgpu_cs.h"
#include "os/os_time.h"
#include "state_tracker/drm_driver.h"
#include <amdgpu_drm.h>
#include <xf86drm.h>
#include <stdio.h>
static const struct pb_vtbl amdgpu_winsys_bo_vtbl;
static inline struct amdgpu_winsys_bo *amdgpu_winsys_bo(struct pb_buffer *bo)
{
assert(bo->vtbl == &amdgpu_winsys_bo_vtbl);
return (struct amdgpu_winsys_bo *)bo;
}
struct amdgpu_bomgr {
struct pb_manager base;
struct amdgpu_winsys *rws;
};
static struct amdgpu_winsys *get_winsys(struct pb_manager *mgr)
{
return ((struct amdgpu_bomgr*)mgr)->rws;
}
static struct amdgpu_winsys_bo *get_amdgpu_winsys_bo(struct pb_buffer *_buf)
{
struct amdgpu_winsys_bo *bo = NULL;
if (_buf->vtbl == &amdgpu_winsys_bo_vtbl) {
bo = amdgpu_winsys_bo(_buf);
} else {
struct pb_buffer *base_buf;
pb_size offset;
pb_get_base_buffer(_buf, &base_buf, &offset);
if (base_buf->vtbl == &amdgpu_winsys_bo_vtbl)
bo = amdgpu_winsys_bo(base_buf);
}
return bo;
}
static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
enum radeon_bo_usage usage)
{
struct amdgpu_winsys_bo *bo = get_amdgpu_winsys_bo(_buf);
struct amdgpu_winsys *ws = bo->rws;
int i;
if (bo->is_shared) {
/* We can't use user fences for shared buffers, because user fences
* are local to this process only. If we want to wait for all buffer
* uses in all processes, we have to use amdgpu_bo_wait_for_idle.
*/
bool buffer_busy = true;
int r;
r = amdgpu_bo_wait_for_idle(bo->bo, timeout, &buffer_busy);
if (r)
fprintf(stderr, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__,
r);
return !buffer_busy;
}
if (timeout == 0) {
/* Timeout == 0 is quite simple. */
pipe_mutex_lock(ws->bo_fence_lock);
for (i = 0; i < RING_LAST; i++)
if (bo->fence[i]) {
if (amdgpu_fence_wait(bo->fence[i], 0, false)) {
/* Release the idle fence to avoid checking it again later. */
amdgpu_fence_reference(&bo->fence[i], NULL);
} else {
pipe_mutex_unlock(ws->bo_fence_lock);
return false;
}
}
pipe_mutex_unlock(ws->bo_fence_lock);
return true;
} else {
struct pipe_fence_handle *fence[RING_LAST] = {};
bool fence_idle[RING_LAST] = {};
bool buffer_idle = true;
int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
/* Take references to all fences, so that we can wait for them
* without the lock. */
pipe_mutex_lock(ws->bo_fence_lock);
for (i = 0; i < RING_LAST; i++)
amdgpu_fence_reference(&fence[i], bo->fence[i]);
pipe_mutex_unlock(ws->bo_fence_lock);
/* Now wait for the fences. */
for (i = 0; i < RING_LAST; i++) {
if (fence[i]) {
if (amdgpu_fence_wait(fence[i], abs_timeout, true))
fence_idle[i] = true;
else
buffer_idle = false;
}
}
/* Release idle fences to avoid checking them again later. */
pipe_mutex_lock(ws->bo_fence_lock);
for (i = 0; i < RING_LAST; i++) {
if (fence[i] == bo->fence[i] && fence_idle[i])
amdgpu_fence_reference(&bo->fence[i], NULL);
amdgpu_fence_reference(&fence[i], NULL);
}
pipe_mutex_unlock(ws->bo_fence_lock);
return buffer_idle;
}
}
static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
struct radeon_winsys_cs_handle *buf)
{
return ((struct amdgpu_winsys_bo*)buf)->initial_domain;
}
static void amdgpu_bo_destroy(struct pb_buffer *_buf)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
int i;
amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
amdgpu_va_range_free(bo->va_handle);
amdgpu_bo_free(bo->bo);
for (i = 0; i < RING_LAST; i++)
amdgpu_fence_reference(&bo->fence[i], NULL);
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
bo->rws->allocated_vram -= align(bo->base.size, bo->rws->gart_page_size);
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
bo->rws->allocated_gtt -= align(bo->base.size, bo->rws->gart_page_size);
FREE(bo);
}
static void *amdgpu_bo_map(struct radeon_winsys_cs_handle *buf,
struct radeon_winsys_cs *rcs,
enum pipe_transfer_usage usage)
{
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
int r;
void *cpu = NULL;
/* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
/* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
if (usage & PIPE_TRANSFER_DONTBLOCK) {
if (!(usage & PIPE_TRANSFER_WRITE)) {
/* Mapping for read.
*
* Since we are mapping for read, we don't need to wait
* if the GPU is using the buffer for read too
* (neither one is changing it).
*
* Only check whether the buffer is being used for write. */
if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
RADEON_USAGE_WRITE)) {
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
return NULL;
}
if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
RADEON_USAGE_WRITE)) {
return NULL;
}
} else {
if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
return NULL;
}
if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
RADEON_USAGE_READWRITE)) {
return NULL;
}
}
} else {
uint64_t time = os_time_get_nano();
if (!(usage & PIPE_TRANSFER_WRITE)) {
/* Mapping for read.
*
* Since we are mapping for read, we don't need to wait
* if the GPU is using the buffer for read too
* (neither one is changing it).
*
* Only check whether the buffer is being used for write. */
if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
RADEON_USAGE_WRITE)) {
cs->flush_cs(cs->flush_data, 0, NULL);
}
amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
RADEON_USAGE_WRITE);
} else {
/* Mapping for write. */
if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo))
cs->flush_cs(cs->flush_data, 0, NULL);
amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
RADEON_USAGE_READWRITE);
}
bo->rws->buffer_wait_time += os_time_get_nano() - time;
}
}
/* If the buffer is created from user memory, return the user pointer. */
if (bo->user_ptr)
return bo->user_ptr;
r = amdgpu_bo_cpu_map(bo->bo, &cpu);
return r ? NULL : cpu;
}
static void amdgpu_bo_unmap(struct radeon_winsys_cs_handle *buf)
{
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
amdgpu_bo_cpu_unmap(bo->bo);
}
static void amdgpu_bo_get_base_buffer(struct pb_buffer *buf,
struct pb_buffer **base_buf,
unsigned *offset)
{
*base_buf = buf;
*offset = 0;
}
static enum pipe_error amdgpu_bo_validate(struct pb_buffer *_buf,
struct pb_validate *vl,
unsigned flags)
{
/* Always pinned */
return PIPE_OK;
}
static void amdgpu_bo_fence(struct pb_buffer *buf,
struct pipe_fence_handle *fence)
{
}
static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
amdgpu_bo_destroy,
NULL, /* never called */
NULL, /* never called */
amdgpu_bo_validate,
amdgpu_bo_fence,
amdgpu_bo_get_base_buffer,
};
static struct pb_buffer *amdgpu_bomgr_create_bo(struct pb_manager *_mgr,
pb_size size,
const struct pb_desc *desc)
{
struct amdgpu_winsys *rws = get_winsys(_mgr);
struct amdgpu_bo_desc *rdesc = (struct amdgpu_bo_desc*)desc;
struct amdgpu_bo_alloc_request request = {0};
amdgpu_bo_handle buf_handle;
uint64_t va = 0;
struct amdgpu_winsys_bo *bo;
amdgpu_va_handle va_handle;
int r;
assert(rdesc->initial_domain & RADEON_DOMAIN_VRAM_GTT);
bo = CALLOC_STRUCT(amdgpu_winsys_bo);
if (!bo) {
return NULL;
}
request.alloc_size = size;
request.phys_alignment = desc->alignment;
if (rdesc->initial_domain & RADEON_DOMAIN_VRAM) {
request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
if (rdesc->flags & RADEON_FLAG_CPU_ACCESS)
request.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
}
if (rdesc->initial_domain & RADEON_DOMAIN_GTT) {
request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
if (rdesc->flags & RADEON_FLAG_GTT_WC)
request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
}
r = amdgpu_bo_alloc(rws->dev, &request, &buf_handle);
if (r) {
fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
fprintf(stderr, "amdgpu: size : %d bytes\n", size);
fprintf(stderr, "amdgpu: alignment : %d bytes\n", desc->alignment);
fprintf(stderr, "amdgpu: domains : %d\n", rdesc->initial_domain);
goto error_bo_alloc;
}
r = amdgpu_va_range_alloc(rws->dev, amdgpu_gpu_va_range_general,
size, desc->alignment, 0, &va, &va_handle, 0);
if (r)
goto error_va_alloc;
r = amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP);
if (r)
goto error_va_map;
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = desc->alignment;
bo->base.usage = desc->usage;
bo->base.size = size;
bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
bo->rws = rws;
bo->bo = buf_handle;
bo->va = va;
bo->va_handle = va_handle;
bo->initial_domain = rdesc->initial_domain;
bo->unique_id = __sync_fetch_and_add(&rws->next_bo_unique_id, 1);
if (rdesc->initial_domain & RADEON_DOMAIN_VRAM)
rws->allocated_vram += align(size, rws->gart_page_size);
else if (rdesc->initial_domain & RADEON_DOMAIN_GTT)
rws->allocated_gtt += align(size, rws->gart_page_size);
return &bo->base;
error_va_map:
amdgpu_va_range_free(va_handle);
error_va_alloc:
amdgpu_bo_free(buf_handle);
error_bo_alloc:
FREE(bo);
return NULL;
}
static void amdgpu_bomgr_flush(struct pb_manager *mgr)
{
/* NOP */
}
/* This is for the cache bufmgr. */
static boolean amdgpu_bomgr_is_buffer_busy(struct pb_manager *_mgr,
struct pb_buffer *_buf)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
if (amdgpu_bo_is_referenced_by_any_cs(bo)) {
return TRUE;
}
if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0, RADEON_USAGE_READWRITE)) {
return TRUE;
}
return FALSE;
}
static void amdgpu_bomgr_destroy(struct pb_manager *mgr)
{
FREE(mgr);
}
struct pb_manager *amdgpu_bomgr_create(struct amdgpu_winsys *rws)
{
struct amdgpu_bomgr *mgr;
mgr = CALLOC_STRUCT(amdgpu_bomgr);
if (!mgr)
return NULL;
mgr->base.destroy = amdgpu_bomgr_destroy;
mgr->base.create_buffer = amdgpu_bomgr_create_bo;
mgr->base.flush = amdgpu_bomgr_flush;
mgr->base.is_buffer_busy = amdgpu_bomgr_is_buffer_busy;
mgr->rws = rws;
return &mgr->base;
}
static unsigned eg_tile_split(unsigned tile_split)
{
switch (tile_split) {
case 0: tile_split = 64; break;
case 1: tile_split = 128; break;
case 2: tile_split = 256; break;
case 3: tile_split = 512; break;
default:
case 4: tile_split = 1024; break;
case 5: tile_split = 2048; break;
case 6: tile_split = 4096; break;
}
return tile_split;
}
static unsigned eg_tile_split_rev(unsigned eg_tile_split)
{
switch (eg_tile_split) {
case 64: return 0;
case 128: return 1;
case 256: return 2;
case 512: return 3;
default:
case 1024: return 4;
case 2048: return 5;
case 4096: return 6;
}
}
static void amdgpu_bo_get_tiling(struct pb_buffer *_buf,
enum radeon_bo_layout *microtiled,
enum radeon_bo_layout *macrotiled,
unsigned *bankw, unsigned *bankh,
unsigned *tile_split,
unsigned *stencil_tile_split,
unsigned *mtilea,
bool *scanout)
{
struct amdgpu_winsys_bo *bo = get_amdgpu_winsys_bo(_buf);
struct amdgpu_bo_info info = {0};
uint32_t tiling_flags;
int r;
r = amdgpu_bo_query_info(bo->bo, &info);
if (r)
return;
tiling_flags = info.metadata.tiling_info;
*microtiled = RADEON_LAYOUT_LINEAR;
*macrotiled = RADEON_LAYOUT_LINEAR;
if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
*macrotiled = RADEON_LAYOUT_TILED;
else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
*microtiled = RADEON_LAYOUT_TILED;
if (bankw && tile_split && mtilea && tile_split) {
*bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
*bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
*tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
*mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
}
if (scanout)
*scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
}
static void amdgpu_bo_set_tiling(struct pb_buffer *_buf,
struct radeon_winsys_cs *rcs,
enum radeon_bo_layout microtiled,
enum radeon_bo_layout macrotiled,
unsigned bankw, unsigned bankh,
unsigned tile_split,
unsigned stencil_tile_split,
unsigned mtilea,
uint32_t pitch,
bool scanout)
{
struct amdgpu_winsys_bo *bo = get_amdgpu_winsys_bo(_buf);
struct amdgpu_bo_metadata metadata = {0};
uint32_t tiling_flags = 0;
if (macrotiled == RADEON_LAYOUT_TILED)
tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
else if (microtiled == RADEON_LAYOUT_TILED)
tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
else
tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(bankw));
tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(bankh));
if (tile_split)
tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(tile_split));
tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(mtilea));
if (scanout)
tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
else
tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
metadata.tiling_info = tiling_flags;
amdgpu_bo_set_metadata(bo->bo, &metadata);
}
static struct radeon_winsys_cs_handle *amdgpu_get_cs_handle(struct pb_buffer *_buf)
{
/* return a direct pointer to amdgpu_winsys_bo. */
return (struct radeon_winsys_cs_handle*)get_amdgpu_winsys_bo(_buf);
}
static struct pb_buffer *
amdgpu_bo_create(struct radeon_winsys *rws,
unsigned size,
unsigned alignment,
boolean use_reusable_pool,
enum radeon_bo_domain domain,
enum radeon_bo_flag flags)
{
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
struct amdgpu_bo_desc desc;
struct pb_manager *provider;
struct pb_buffer *buffer;
/* Don't use VRAM if the GPU doesn't have much. This is only the initial
* domain. The kernel is free to move the buffer if it wants to.
*
* 64MB means no VRAM by todays standards.
*/
if (domain & RADEON_DOMAIN_VRAM && ws->info.vram_size <= 64*1024*1024) {
domain = RADEON_DOMAIN_GTT;
flags = RADEON_FLAG_GTT_WC;
}
memset(&desc, 0, sizeof(desc));
desc.base.alignment = alignment;
/* Align size to page size. This is the minimum alignment for normal
* BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
* like constant/uniform buffers, can benefit from better and more reuse.
*/
size = align(size, ws->gart_page_size);
/* Only set one usage bit each for domains and flags, or the cache manager
* might consider different sets of domains / flags compatible
*/
if (domain == RADEON_DOMAIN_VRAM_GTT)
desc.base.usage = 1 << 2;
else
desc.base.usage = domain >> 1;
assert(flags < sizeof(desc.base.usage) * 8 - 3);
desc.base.usage |= 1 << (flags + 3);
desc.initial_domain = domain;
desc.flags = flags;
/* Assign a buffer manager. */
if (use_reusable_pool)
provider = ws->cman;
else
provider = ws->kman;
buffer = provider->create_buffer(provider, size, &desc.base);
if (!buffer)
return NULL;
return (struct pb_buffer*)buffer;
}
static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
struct winsys_handle *whandle,
unsigned *stride)
{
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
struct amdgpu_winsys_bo *bo;
enum amdgpu_bo_handle_type type;
struct amdgpu_bo_import_result result = {0};
uint64_t va;
amdgpu_va_handle va_handle;
struct amdgpu_bo_info info = {0};
enum radeon_bo_domain initial = 0;
int r;
/* Initialize the structure. */
bo = CALLOC_STRUCT(amdgpu_winsys_bo);
if (!bo) {
return NULL;
}
switch (whandle->type) {
case DRM_API_HANDLE_TYPE_SHARED:
type = amdgpu_bo_handle_type_gem_flink_name;
break;
case DRM_API_HANDLE_TYPE_FD:
type = amdgpu_bo_handle_type_dma_buf_fd;
break;
default:
return NULL;
}
r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
if (r)
goto error;
/* Get initial domains. */
r = amdgpu_bo_query_info(result.buf_handle, &info);
if (r)
goto error_query;
r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
result.alloc_size, 1 << 20, 0, &va, &va_handle, 0);
if (r)
goto error_query;
r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP);
if (r)
goto error_va_map;
if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
initial |= RADEON_DOMAIN_VRAM;
if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
initial |= RADEON_DOMAIN_GTT;
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = info.phys_alignment;
bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
bo->bo = result.buf_handle;
bo->base.size = result.alloc_size;
bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
bo->rws = ws;
bo->va = va;
bo->va_handle = va_handle;
bo->initial_domain = initial;
bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
bo->is_shared = true;
if (stride)
*stride = whandle->stride;
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
ws->allocated_vram += align(bo->base.size, ws->gart_page_size);
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
ws->allocated_gtt += align(bo->base.size, ws->gart_page_size);
return &bo->base;
error_va_map:
amdgpu_va_range_free(va_handle);
error_query:
amdgpu_bo_free(result.buf_handle);
error:
FREE(bo);
return NULL;
}
static boolean amdgpu_bo_get_handle(struct pb_buffer *buffer,
unsigned stride,
struct winsys_handle *whandle)
{
struct amdgpu_winsys_bo *bo = get_amdgpu_winsys_bo(buffer);
enum amdgpu_bo_handle_type type;
int r;
switch (whandle->type) {
case DRM_API_HANDLE_TYPE_SHARED:
type = amdgpu_bo_handle_type_gem_flink_name;
break;
case DRM_API_HANDLE_TYPE_FD:
type = amdgpu_bo_handle_type_dma_buf_fd;
break;
case DRM_API_HANDLE_TYPE_KMS:
type = amdgpu_bo_handle_type_kms;
break;
default:
return FALSE;
}
r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
if (r)
return FALSE;
whandle->stride = stride;
bo->is_shared = true;
return TRUE;
}
static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
void *pointer, unsigned size)
{
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
amdgpu_bo_handle buf_handle;
struct amdgpu_winsys_bo *bo;
uint64_t va;
amdgpu_va_handle va_handle;
bo = CALLOC_STRUCT(amdgpu_winsys_bo);
if (!bo)
return NULL;
if (amdgpu_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle))
goto error;
if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
size, 1 << 12, 0, &va, &va_handle, 0))
goto error_va_alloc;
if (amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP))
goto error_va_map;
/* Initialize it. */
pipe_reference_init(&bo->base.reference, 1);
bo->bo = buf_handle;
bo->base.alignment = 0;
bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
bo->base.size = size;
bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
bo->rws = ws;
bo->user_ptr = pointer;
bo->va = va;
bo->va_handle = va_handle;
bo->initial_domain = RADEON_DOMAIN_GTT;
bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
ws->allocated_gtt += align(bo->base.size, ws->gart_page_size);
return (struct pb_buffer*)bo;
error_va_map:
amdgpu_va_range_free(va_handle);
error_va_alloc:
amdgpu_bo_free(buf_handle);
error:
FREE(bo);
return NULL;
}
static uint64_t amdgpu_bo_get_va(struct radeon_winsys_cs_handle *buf)
{
return ((struct amdgpu_winsys_bo*)buf)->va;
}
void amdgpu_bomgr_init_functions(struct amdgpu_winsys *ws)
{
ws->base.buffer_get_cs_handle = amdgpu_get_cs_handle;
ws->base.buffer_set_tiling = amdgpu_bo_set_tiling;
ws->base.buffer_get_tiling = amdgpu_bo_get_tiling;
ws->base.buffer_map = amdgpu_bo_map;
ws->base.buffer_unmap = amdgpu_bo_unmap;
ws->base.buffer_wait = amdgpu_bo_wait;
ws->base.buffer_create = amdgpu_bo_create;
ws->base.buffer_from_handle = amdgpu_bo_from_handle;
ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
ws->base.buffer_get_handle = amdgpu_bo_get_handle;
ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
}

View File

@ -0,0 +1,80 @@
/*
* Copyright © 2008 Jérôme Glisse
* Copyright © 2011 Marek Olšák <maraeo@gmail.com>
* Copyright © 2015 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Marek Olšák <maraeo@gmail.com>
*/
#ifndef AMDGPU_BO_H
#define AMDGPU_BO_H
#include "amdgpu_winsys.h"
#include "pipebuffer/pb_bufmgr.h"
struct amdgpu_bo_desc {
struct pb_desc base;
enum radeon_bo_domain initial_domain;
unsigned flags;
};
struct amdgpu_winsys_bo {
struct pb_buffer base;
struct amdgpu_winsys *rws;
void *user_ptr; /* from buffer_from_ptr */
amdgpu_bo_handle bo;
uint32_t unique_id;
amdgpu_va_handle va_handle;
uint64_t va;
enum radeon_bo_domain initial_domain;
/* how many command streams is this bo referenced in? */
int num_cs_references;
/* whether buffer_get_handle or buffer_from_handle was called,
* it can only transition from false to true
*/
volatile int is_shared; /* bool (int for atomicity) */
/* Fences for buffer synchronization. */
struct pipe_fence_handle *fence[RING_LAST];
};
struct pb_manager *amdgpu_bomgr_create(struct amdgpu_winsys *rws);
void amdgpu_bomgr_init_functions(struct amdgpu_winsys *ws);
static inline
void amdgpu_winsys_bo_reference(struct amdgpu_winsys_bo **dst,
struct amdgpu_winsys_bo *src)
{
pb_reference((struct pb_buffer**)dst, (struct pb_buffer*)src);
}
#endif

View File

@ -0,0 +1,704 @@
/*
* Copyright © 2008 Jérôme Glisse
* Copyright © 2010 Marek Olšák <maraeo@gmail.com>
* Copyright © 2015 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Marek Olšák <maraeo@gmail.com>
*/
#include "amdgpu_cs.h"
#include "os/os_time.h"
#include <stdio.h>
#include <amdgpu_drm.h>
/* FENCES */
static struct pipe_fence_handle *
amdgpu_fence_create(struct amdgpu_ctx *ctx, unsigned ip_type,
unsigned ip_instance, unsigned ring)
{
struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
fence->reference.count = 1;
fence->ctx = ctx;
fence->fence.context = ctx->ctx;
fence->fence.ip_type = ip_type;
fence->fence.ip_instance = ip_instance;
fence->fence.ring = ring;
p_atomic_inc(&ctx->refcount);
return (struct pipe_fence_handle *)fence;
}
static void amdgpu_fence_submitted(struct pipe_fence_handle *fence,
struct amdgpu_cs_request* request,
uint64_t *user_fence_cpu_address)
{
struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
rfence->fence.fence = request->seq_no;
rfence->user_fence_cpu_address = user_fence_cpu_address;
}
static void amdgpu_fence_signalled(struct pipe_fence_handle *fence)
{
struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
rfence->signalled = true;
}
bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
bool absolute)
{
struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
uint32_t expired;
int64_t abs_timeout;
uint64_t *user_fence_cpu;
int r;
if (rfence->signalled)
return true;
if (absolute)
abs_timeout = timeout;
else
abs_timeout = os_time_get_absolute_timeout(timeout);
user_fence_cpu = rfence->user_fence_cpu_address;
if (user_fence_cpu && *user_fence_cpu >= rfence->fence.fence) {
rfence->signalled = true;
return true;
}
/* Now use the libdrm query. */
r = amdgpu_cs_query_fence_status(&rfence->fence,
abs_timeout,
AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE,
&expired);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
return FALSE;
}
if (expired) {
/* This variable can only transition from false to true, so it doesn't
* matter if threads race for it. */
rfence->signalled = true;
return true;
}
return false;
}
static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys *rws,
struct pipe_fence_handle *fence,
uint64_t timeout)
{
return amdgpu_fence_wait(fence, timeout, false);
}
/* CONTEXTS */
static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws)
{
struct amdgpu_ctx *ctx = CALLOC_STRUCT(amdgpu_ctx);
int r;
struct amdgpu_bo_alloc_request alloc_buffer = {};
amdgpu_bo_handle buf_handle;
ctx->ws = amdgpu_winsys(ws);
ctx->refcount = 1;
r = amdgpu_cs_ctx_create(ctx->ws->dev, &ctx->ctx);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r);
FREE(ctx);
return NULL;
}
alloc_buffer.alloc_size = 4 * 1024;
alloc_buffer.phys_alignment = 4 *1024;
alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
r = amdgpu_bo_alloc(ctx->ws->dev, &alloc_buffer, &buf_handle);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r);
amdgpu_cs_ctx_free(ctx->ctx);
FREE(ctx);
return NULL;
}
r = amdgpu_bo_cpu_map(buf_handle, (void**)&ctx->user_fence_cpu_address_base);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r);
amdgpu_bo_free(buf_handle);
amdgpu_cs_ctx_free(ctx->ctx);
FREE(ctx);
return NULL;
}
memset(ctx->user_fence_cpu_address_base, 0, alloc_buffer.alloc_size);
ctx->user_fence_bo = buf_handle;
return (struct radeon_winsys_ctx*)ctx;
}
static void amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
{
amdgpu_ctx_unref((struct amdgpu_ctx*)rwctx);
}
static enum pipe_reset_status
amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx *rwctx)
{
struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
uint32_t result, hangs;
int r;
r = amdgpu_cs_query_reset_state(ctx->ctx, &result, &hangs);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r);
return PIPE_NO_RESET;
}
switch (result) {
case AMDGPU_CTX_GUILTY_RESET:
return PIPE_GUILTY_CONTEXT_RESET;
case AMDGPU_CTX_INNOCENT_RESET:
return PIPE_INNOCENT_CONTEXT_RESET;
case AMDGPU_CTX_UNKNOWN_RESET:
return PIPE_UNKNOWN_CONTEXT_RESET;
case AMDGPU_CTX_NO_RESET:
default:
return PIPE_NO_RESET;
}
}
/* COMMAND SUBMISSION */
static bool amdgpu_get_new_ib(struct amdgpu_cs *cs)
{
/* The maximum size is 4MB - 1B, which is unaligned.
* Use aligned size 4MB - 16B. */
const unsigned max_ib_size = (1024 * 1024 - 16) * 4;
const unsigned min_ib_size = 24 * 1024 * 4;
cs->base.cdw = 0;
cs->base.buf = NULL;
/* Allocate a new buffer for IBs if the current buffer is all used. */
if (!cs->big_ib_buffer ||
cs->used_ib_space + min_ib_size > cs->big_ib_buffer->size) {
struct radeon_winsys *ws = &cs->ctx->ws->base;
struct radeon_winsys_cs_handle *winsys_bo;
pb_reference(&cs->big_ib_buffer, NULL);
cs->big_ib_winsys_buffer = NULL;
cs->ib_mapped = NULL;
cs->used_ib_space = 0;
cs->big_ib_buffer = ws->buffer_create(ws, max_ib_size,
4096, true,
RADEON_DOMAIN_GTT,
RADEON_FLAG_CPU_ACCESS);
if (!cs->big_ib_buffer)
return false;
winsys_bo = ws->buffer_get_cs_handle(cs->big_ib_buffer);
cs->ib_mapped = ws->buffer_map(winsys_bo, NULL, PIPE_TRANSFER_WRITE);
if (!cs->ib_mapped) {
pb_reference(&cs->big_ib_buffer, NULL);
return false;
}
cs->big_ib_winsys_buffer = (struct amdgpu_winsys_bo*)winsys_bo;
}
cs->ib.ib_mc_address = cs->big_ib_winsys_buffer->va + cs->used_ib_space;
cs->base.buf = (uint32_t*)(cs->ib_mapped + cs->used_ib_space);
cs->base.max_dw = (cs->big_ib_buffer->size - cs->used_ib_space) / 4;
return true;
}
static boolean amdgpu_init_cs_context(struct amdgpu_cs *cs,
enum ring_type ring_type)
{
int i;
switch (ring_type) {
case RING_DMA:
cs->request.ip_type = AMDGPU_HW_IP_DMA;
break;
case RING_UVD:
cs->request.ip_type = AMDGPU_HW_IP_UVD;
break;
case RING_VCE:
cs->request.ip_type = AMDGPU_HW_IP_VCE;
break;
case RING_COMPUTE:
cs->request.ip_type = AMDGPU_HW_IP_COMPUTE;
break;
default:
case RING_GFX:
cs->request.ip_type = AMDGPU_HW_IP_GFX;
break;
}
cs->request.number_of_ibs = 1;
cs->request.ibs = &cs->ib;
cs->max_num_buffers = 512;
cs->buffers = (struct amdgpu_cs_buffer*)
CALLOC(1, cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer));
if (!cs->buffers) {
return FALSE;
}
cs->handles = CALLOC(1, cs->max_num_buffers * sizeof(amdgpu_bo_handle));
if (!cs->handles) {
FREE(cs->buffers);
return FALSE;
}
cs->flags = CALLOC(1, cs->max_num_buffers);
if (!cs->flags) {
FREE(cs->handles);
FREE(cs->buffers);
return FALSE;
}
for (i = 0; i < Elements(cs->buffer_indices_hashlist); i++) {
cs->buffer_indices_hashlist[i] = -1;
}
return TRUE;
}
static void amdgpu_cs_context_cleanup(struct amdgpu_cs *cs)
{
unsigned i;
for (i = 0; i < cs->num_buffers; i++) {
p_atomic_dec(&cs->buffers[i].bo->num_cs_references);
amdgpu_winsys_bo_reference(&cs->buffers[i].bo, NULL);
cs->handles[i] = NULL;
cs->flags[i] = 0;
}
cs->num_buffers = 0;
cs->used_gart = 0;
cs->used_vram = 0;
for (i = 0; i < Elements(cs->buffer_indices_hashlist); i++) {
cs->buffer_indices_hashlist[i] = -1;
}
}
static void amdgpu_destroy_cs_context(struct amdgpu_cs *cs)
{
amdgpu_cs_context_cleanup(cs);
FREE(cs->flags);
FREE(cs->buffers);
FREE(cs->handles);
FREE(cs->request.dependencies);
}
static struct radeon_winsys_cs *
amdgpu_cs_create(struct radeon_winsys_ctx *rwctx,
enum ring_type ring_type,
void (*flush)(void *ctx, unsigned flags,
struct pipe_fence_handle **fence),
void *flush_ctx,
struct radeon_winsys_cs_handle *trace_buf)
{
struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
struct amdgpu_cs *cs;
cs = CALLOC_STRUCT(amdgpu_cs);
if (!cs) {
return NULL;
}
cs->ctx = ctx;
cs->flush_cs = flush;
cs->flush_data = flush_ctx;
cs->base.ring_type = ring_type;
if (!amdgpu_init_cs_context(cs, ring_type)) {
FREE(cs);
return NULL;
}
if (!amdgpu_get_new_ib(cs)) {
amdgpu_destroy_cs_context(cs);
FREE(cs);
return NULL;
}
p_atomic_inc(&ctx->ws->num_cs);
return &cs->base;
}
#define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
int amdgpu_get_reloc(struct amdgpu_cs *cs, struct amdgpu_winsys_bo *bo)
{
unsigned hash = bo->unique_id & (Elements(cs->buffer_indices_hashlist)-1);
int i = cs->buffer_indices_hashlist[hash];
/* not found or found */
if (i == -1 || cs->buffers[i].bo == bo)
return i;
/* Hash collision, look for the BO in the list of relocs linearly. */
for (i = cs->num_buffers - 1; i >= 0; i--) {
if (cs->buffers[i].bo == bo) {
/* Put this reloc in the hash list.
* This will prevent additional hash collisions if there are
* several consecutive get_reloc calls for the same buffer.
*
* Example: Assuming buffers A,B,C collide in the hash list,
* the following sequence of relocs:
* AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
* will collide here: ^ and here: ^,
* meaning that we should get very few collisions in the end. */
cs->buffer_indices_hashlist[hash] = i;
return i;
}
}
return -1;
}
static unsigned amdgpu_add_reloc(struct amdgpu_cs *cs,
struct amdgpu_winsys_bo *bo,
enum radeon_bo_usage usage,
enum radeon_bo_domain domains,
unsigned priority,
enum radeon_bo_domain *added_domains)
{
struct amdgpu_cs_buffer *reloc;
unsigned hash = bo->unique_id & (Elements(cs->buffer_indices_hashlist)-1);
int i = -1;
priority = MIN2(priority, 15);
*added_domains = 0;
i = amdgpu_get_reloc(cs, bo);
if (i >= 0) {
reloc = &cs->buffers[i];
reloc->usage |= usage;
*added_domains = domains & ~reloc->domains;
reloc->domains |= domains;
cs->flags[i] = MAX2(cs->flags[i], priority);
return i;
}
/* New relocation, check if the backing array is large enough. */
if (cs->num_buffers >= cs->max_num_buffers) {
uint32_t size;
cs->max_num_buffers += 10;
size = cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer);
cs->buffers = realloc(cs->buffers, size);
size = cs->max_num_buffers * sizeof(amdgpu_bo_handle);
cs->handles = realloc(cs->handles, size);
cs->flags = realloc(cs->flags, cs->max_num_buffers);
}
/* Initialize the new relocation. */
cs->buffers[cs->num_buffers].bo = NULL;
amdgpu_winsys_bo_reference(&cs->buffers[cs->num_buffers].bo, bo);
cs->handles[cs->num_buffers] = bo->bo;
cs->flags[cs->num_buffers] = priority;
p_atomic_inc(&bo->num_cs_references);
reloc = &cs->buffers[cs->num_buffers];
reloc->bo = bo;
reloc->usage = usage;
reloc->domains = domains;
cs->buffer_indices_hashlist[hash] = cs->num_buffers;
*added_domains = domains;
return cs->num_buffers++;
}
static unsigned amdgpu_cs_add_reloc(struct radeon_winsys_cs *rcs,
struct radeon_winsys_cs_handle *buf,
enum radeon_bo_usage usage,
enum radeon_bo_domain domains,
enum radeon_bo_priority priority)
{
/* Don't use the "domains" parameter. Amdgpu doesn't support changing
* the buffer placement during command submission.
*/
struct amdgpu_cs *cs = amdgpu_cs(rcs);
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
enum radeon_bo_domain added_domains;
unsigned index = amdgpu_add_reloc(cs, bo, usage, bo->initial_domain,
priority, &added_domains);
if (added_domains & RADEON_DOMAIN_GTT)
cs->used_gart += bo->base.size;
if (added_domains & RADEON_DOMAIN_VRAM)
cs->used_vram += bo->base.size;
return index;
}
static int amdgpu_cs_get_reloc(struct radeon_winsys_cs *rcs,
struct radeon_winsys_cs_handle *buf)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
return amdgpu_get_reloc(cs, (struct amdgpu_winsys_bo*)buf);
}
static boolean amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
{
return TRUE;
}
static boolean amdgpu_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
boolean status =
(cs->used_gart + gtt) < cs->ctx->ws->info.gart_size * 0.7 &&
(cs->used_vram + vram) < cs->ctx->ws->info.vram_size * 0.7;
return status;
}
static void amdgpu_cs_do_submission(struct amdgpu_cs *cs,
struct pipe_fence_handle **out_fence)
{
struct amdgpu_winsys *ws = cs->ctx->ws;
struct pipe_fence_handle *fence;
int i, j, r;
/* Create a fence. */
fence = amdgpu_fence_create(cs->ctx,
cs->request.ip_type,
cs->request.ip_instance,
cs->request.ring);
if (out_fence)
amdgpu_fence_reference(out_fence, fence);
cs->request.number_of_dependencies = 0;
/* Since the kernel driver doesn't synchronize execution between different
* rings automatically, we have to add fence dependencies manually. */
pipe_mutex_lock(ws->bo_fence_lock);
for (i = 0; i < cs->num_buffers; i++) {
for (j = 0; j < RING_LAST; j++) {
struct amdgpu_cs_fence *dep;
unsigned idx;
struct amdgpu_fence *bo_fence = (void *)cs->buffers[i].bo->fence[j];
if (!bo_fence)
continue;
if (bo_fence->ctx == cs->ctx &&
bo_fence->fence.ip_type == cs->request.ip_type &&
bo_fence->fence.ip_instance == cs->request.ip_instance &&
bo_fence->fence.ring == cs->request.ring)
continue;
if (amdgpu_fence_wait((void *)bo_fence, 0, false))
continue;
idx = cs->request.number_of_dependencies++;
if (idx >= cs->max_dependencies) {
unsigned size;
cs->max_dependencies = idx + 8;
size = cs->max_dependencies * sizeof(struct amdgpu_cs_fence);
cs->request.dependencies = realloc(cs->request.dependencies, size);
}
dep = &cs->request.dependencies[idx];
memcpy(dep, &bo_fence->fence, sizeof(*dep));
}
}
cs->request.fence_info.handle = NULL;
if (cs->request.ip_type != AMDGPU_HW_IP_UVD && cs->request.ip_type != AMDGPU_HW_IP_VCE) {
cs->request.fence_info.handle = cs->ctx->user_fence_bo;
cs->request.fence_info.offset = cs->base.ring_type;
}
r = amdgpu_cs_submit(cs->ctx->ctx, 0, &cs->request, 1);
if (r) {
if (r == -ENOMEM)
fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
else
fprintf(stderr, "amdgpu: The CS has been rejected, "
"see dmesg for more information.\n");
amdgpu_fence_signalled(fence);
} else {
/* Success. */
uint64_t *user_fence = NULL;
if (cs->request.ip_type != AMDGPU_HW_IP_UVD && cs->request.ip_type != AMDGPU_HW_IP_VCE)
user_fence = cs->ctx->user_fence_cpu_address_base +
cs->request.fence_info.offset;
amdgpu_fence_submitted(fence, &cs->request, user_fence);
for (i = 0; i < cs->num_buffers; i++)
amdgpu_fence_reference(&cs->buffers[i].bo->fence[cs->base.ring_type],
fence);
}
pipe_mutex_unlock(ws->bo_fence_lock);
amdgpu_fence_reference(&fence, NULL);
}
static void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs)
{
/* no-op */
}
DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
static void amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
unsigned flags,
struct pipe_fence_handle **fence,
uint32_t cs_trace_id)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
struct amdgpu_winsys *ws = cs->ctx->ws;
switch (cs->base.ring_type) {
case RING_DMA:
/* pad DMA ring to 8 DWs */
if (ws->info.chip_class <= SI) {
while (rcs->cdw & 7)
OUT_CS(&cs->base, 0xf0000000); /* NOP packet */
} else {
while (rcs->cdw & 7)
OUT_CS(&cs->base, 0x00000000); /* NOP packet */
}
break;
case RING_GFX:
/* pad DMA ring to 8 DWs to meet CP fetch alignment requirements
* r6xx, requires at least 4 dw alignment to avoid a hw bug.
*/
if (ws->info.chip_class <= SI) {
while (rcs->cdw & 7)
OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
} else {
while (rcs->cdw & 7)
OUT_CS(&cs->base, 0xffff1000); /* type3 nop packet */
}
break;
case RING_UVD:
while (rcs->cdw & 15)
OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
break;
default:
break;
}
if (rcs->cdw > rcs->max_dw) {
fprintf(stderr, "amdgpu: command stream overflowed\n");
}
amdgpu_cs_add_reloc(rcs, (void*)cs->big_ib_winsys_buffer,
RADEON_USAGE_READ, 0, RADEON_PRIO_MIN);
/* If the CS is not empty or overflowed.... */
if (cs->base.cdw && cs->base.cdw <= cs->base.max_dw && !debug_get_option_noop()) {
int r;
r = amdgpu_bo_list_create(ws->dev, cs->num_buffers,
cs->handles, cs->flags,
&cs->request.resources);
if (r) {
fprintf(stderr, "amdgpu: resource list creation failed (%d)\n", r);
cs->request.resources = NULL;
goto cleanup;
}
cs->ib.size = cs->base.cdw;
cs->used_ib_space += cs->base.cdw * 4;
amdgpu_cs_do_submission(cs, fence);
/* Cleanup. */
if (cs->request.resources)
amdgpu_bo_list_destroy(cs->request.resources);
}
cleanup:
amdgpu_cs_context_cleanup(cs);
amdgpu_get_new_ib(cs);
ws->num_cs_flushes++;
}
static void amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
amdgpu_destroy_cs_context(cs);
p_atomic_dec(&cs->ctx->ws->num_cs);
pb_reference(&cs->big_ib_buffer, NULL);
FREE(cs);
}
static boolean amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
struct radeon_winsys_cs_handle *_buf,
enum radeon_bo_usage usage)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)_buf;
return amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, usage);
}
void amdgpu_cs_init_functions(struct amdgpu_winsys *ws)
{
ws->base.ctx_create = amdgpu_ctx_create;
ws->base.ctx_destroy = amdgpu_ctx_destroy;
ws->base.ctx_query_reset_status = amdgpu_ctx_query_reset_status;
ws->base.cs_create = amdgpu_cs_create;
ws->base.cs_destroy = amdgpu_cs_destroy;
ws->base.cs_add_reloc = amdgpu_cs_add_reloc;
ws->base.cs_get_reloc = amdgpu_cs_get_reloc;
ws->base.cs_validate = amdgpu_cs_validate;
ws->base.cs_memory_below_limit = amdgpu_cs_memory_below_limit;
ws->base.cs_flush = amdgpu_cs_flush;
ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
ws->base.cs_sync_flush = amdgpu_cs_sync_flush;
ws->base.fence_wait = amdgpu_fence_wait_rel_timeout;
ws->base.fence_reference = amdgpu_fence_reference;
}

View File

@ -0,0 +1,162 @@
/*
* Copyright © 2011 Marek Olšák <maraeo@gmail.com>
* Copyright © 2015 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Marek Olšák <maraeo@gmail.com>
*/
#ifndef AMDGPU_CS_H
#define AMDGPU_CS_H
#include "amdgpu_bo.h"
#include "util/u_memory.h"
struct amdgpu_ctx {
struct amdgpu_winsys *ws;
amdgpu_context_handle ctx;
amdgpu_bo_handle user_fence_bo;
uint64_t *user_fence_cpu_address_base;
int refcount;
};
struct amdgpu_cs_buffer {
struct amdgpu_winsys_bo *bo;
enum radeon_bo_usage usage;
enum radeon_bo_domain domains;
};
struct amdgpu_cs {
struct radeon_winsys_cs base;
struct amdgpu_ctx *ctx;
/* Flush CS. */
void (*flush_cs)(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
void *flush_data;
/* A buffer out of which new IBs are allocated. */
struct pb_buffer *big_ib_buffer; /* for holding the reference */
struct amdgpu_winsys_bo *big_ib_winsys_buffer;
uint8_t *ib_mapped;
unsigned used_ib_space;
/* amdgpu_cs_submit parameters */
struct amdgpu_cs_request request;
struct amdgpu_cs_ib_info ib;
/* Relocs. */
unsigned max_num_buffers;
unsigned num_buffers;
amdgpu_bo_handle *handles;
uint8_t *flags;
struct amdgpu_cs_buffer *buffers;
int buffer_indices_hashlist[512];
unsigned used_vram;
unsigned used_gart;
unsigned max_dependencies;
};
struct amdgpu_fence {
struct pipe_reference reference;
struct amdgpu_ctx *ctx; /* submission context */
struct amdgpu_cs_fence fence;
uint64_t *user_fence_cpu_address;
volatile int signalled; /* bool (int for atomicity) */
};
static inline void amdgpu_ctx_unref(struct amdgpu_ctx *ctx)
{
if (p_atomic_dec_zero(&ctx->refcount)) {
amdgpu_cs_ctx_free(ctx->ctx);
amdgpu_bo_free(ctx->user_fence_bo);
FREE(ctx);
}
}
static inline void amdgpu_fence_reference(struct pipe_fence_handle **dst,
struct pipe_fence_handle *src)
{
struct amdgpu_fence **rdst = (struct amdgpu_fence **)dst;
struct amdgpu_fence *rsrc = (struct amdgpu_fence *)src;
if (pipe_reference(&(*rdst)->reference, &rsrc->reference)) {
amdgpu_ctx_unref((*rdst)->ctx);
FREE(*rdst);
}
*rdst = rsrc;
}
int amdgpu_get_reloc(struct amdgpu_cs *csc, struct amdgpu_winsys_bo *bo);
static inline struct amdgpu_cs *
amdgpu_cs(struct radeon_winsys_cs *base)
{
return (struct amdgpu_cs*)base;
}
static inline boolean
amdgpu_bo_is_referenced_by_cs(struct amdgpu_cs *cs,
struct amdgpu_winsys_bo *bo)
{
int num_refs = bo->num_cs_references;
return num_refs == bo->rws->num_cs ||
(num_refs && amdgpu_get_reloc(cs, bo) != -1);
}
static inline boolean
amdgpu_bo_is_referenced_by_cs_with_usage(struct amdgpu_cs *cs,
struct amdgpu_winsys_bo *bo,
enum radeon_bo_usage usage)
{
int index;
if (!bo->num_cs_references)
return FALSE;
index = amdgpu_get_reloc(cs, bo);
if (index == -1)
return FALSE;
return (cs->buffers[index].usage & usage) != 0;
}
static inline boolean
amdgpu_bo_is_referenced_by_any_cs(struct amdgpu_winsys_bo *bo)
{
return bo->num_cs_references != 0;
}
bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
bool absolute);
void amdgpu_cs_init_functions(struct amdgpu_winsys *ws);
#endif

View File

@ -0,0 +1,40 @@
/*
* Copyright © 2015 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
#ifndef AMDGPU_PUBLIC_H
#define AMDGPU_PUBLIC_H
#include "pipe/p_defines.h"
struct radeon_winsys;
struct pipe_screen;
typedef struct pipe_screen *(*radeon_screen_create_t)(struct radeon_winsys *);
struct radeon_winsys *
amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create);
#endif

View File

@ -0,0 +1,448 @@
/*
* Copyright © 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
* Copyright © 2009 Joakim Sindholt <opensource@zhasha.com>
* Copyright © 2011 Marek Olšák <maraeo@gmail.com>
* Copyright © 2015 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Marek Olšák <maraeo@gmail.com>
*/
#include "amdgpu_cs.h"
#include "amdgpu_public.h"
#include "util/u_hash_table.h"
#include <amdgpu_drm.h>
#include <xf86drm.h>
#include <stdio.h>
#include <sys/stat.h>
#define CIK_TILE_MODE_COLOR_2D 14
#define CIK__GB_TILE_MODE__PIPE_CONFIG(x) (((x) >> 6) & 0x1f)
#define CIK__PIPE_CONFIG__ADDR_SURF_P2 0
#define CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16 4
#define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16 5
#define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32 6
#define CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32 7
#define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16 8
#define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16 9
#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16 10
#define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16 11
#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16 12
#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32 13
#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32 14
#define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16 16
#define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16 17
static struct util_hash_table *dev_tab = NULL;
pipe_static_mutex(dev_tab_mutex);
static unsigned cik_get_num_tile_pipes(struct amdgpu_gpu_info *info)
{
unsigned mode2d = info->gb_tile_mode[CIK_TILE_MODE_COLOR_2D];
switch (CIK__GB_TILE_MODE__PIPE_CONFIG(mode2d)) {
case CIK__PIPE_CONFIG__ADDR_SURF_P2:
default:
return 2;
case CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16:
case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16:
case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32:
case CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32:
return 4;
case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16:
case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16:
case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16:
case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16:
case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16:
case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32:
case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32:
return 8;
case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16:
case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16:
return 16;
}
}
/* Convert Sea Islands register values GB_ADDR_CFG and MC_ADDR_CFG
* into GB_TILING_CONFIG register which is only present on R600-R700. */
static unsigned r600_get_gb_tiling_config(struct amdgpu_gpu_info *info)
{
unsigned num_pipes = info->gb_addr_cfg & 0x7;
unsigned num_banks = info->mc_arb_ramcfg & 0x3;
unsigned pipe_interleave_bytes = (info->gb_addr_cfg >> 4) & 0x7;
unsigned row_size = (info->gb_addr_cfg >> 28) & 0x3;
return num_pipes | (num_banks << 4) |
(pipe_interleave_bytes << 8) |
(row_size << 12);
}
/* Helper function to do the ioctls needed for setup and init. */
static boolean do_winsys_init(struct amdgpu_winsys *ws)
{
struct amdgpu_buffer_size_alignments alignment_info = {};
struct amdgpu_heap_info vram, gtt;
struct drm_amdgpu_info_hw_ip dma = {}, uvd = {}, vce = {};
uint32_t vce_version = 0, vce_feature = 0;
int r;
/* Query hardware and driver information. */
r = amdgpu_query_gpu_info(ws->dev, &ws->amdinfo);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_query_gpu_info failed.\n");
goto fail;
}
r = amdgpu_query_buffer_size_alignment(ws->dev, &alignment_info);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_query_buffer_size_alignment failed.\n");
goto fail;
}
r = amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &vram);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_query_heap_info(vram) failed.\n");
goto fail;
}
r = amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &gtt);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_query_heap_info(gtt) failed.\n");
goto fail;
}
r = amdgpu_query_hw_ip_info(ws->dev, AMDGPU_HW_IP_DMA, 0, &dma);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(dma) failed.\n");
goto fail;
}
r = amdgpu_query_hw_ip_info(ws->dev, AMDGPU_HW_IP_UVD, 0, &uvd);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(uvd) failed.\n");
goto fail;
}
r = amdgpu_query_hw_ip_info(ws->dev, AMDGPU_HW_IP_VCE, 0, &vce);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vce) failed.\n");
goto fail;
}
r = amdgpu_query_firmware_version(ws->dev, AMDGPU_INFO_FW_VCE, 0, 0,
&vce_version, &vce_feature);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(vce) failed.\n");
goto fail;
}
/* Set chip identification. */
ws->info.pci_id = ws->amdinfo.asic_id; /* TODO: is this correct? */
switch (ws->info.pci_id) {
#define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; break;
#include "pci_ids/radeonsi_pci_ids.h"
#undef CHIPSET
default:
fprintf(stderr, "amdgpu: Invalid PCI ID.\n");
goto fail;
}
if (ws->info.family >= CHIP_TONGA)
ws->info.chip_class = VI;
else if (ws->info.family >= CHIP_BONAIRE)
ws->info.chip_class = CIK;
else {
fprintf(stderr, "amdgpu: Unknown family.\n");
goto fail;
}
/* LLVM 3.6 is required for VI. */
if (ws->info.chip_class >= VI &&
(HAVE_LLVM < 0x0306 ||
(HAVE_LLVM == 0x0306 && MESA_LLVM_VERSION_PATCH < 1))) {
fprintf(stderr, "amdgpu: LLVM 3.6.1 is required, got LLVM %i.%i.%i\n",
HAVE_LLVM >> 8, HAVE_LLVM & 255, MESA_LLVM_VERSION_PATCH);
goto fail;
}
/* Set hardware information. */
ws->info.gart_size = gtt.heap_size;
ws->info.vram_size = vram.heap_size;
/* convert the shader clock from KHz to MHz */
ws->info.max_sclk = ws->amdinfo.max_engine_clk / 1000;
ws->info.max_compute_units = 1; /* TODO */
ws->info.max_se = ws->amdinfo.num_shader_engines;
ws->info.max_sh_per_se = ws->amdinfo.num_shader_arrays_per_engine;
ws->info.has_uvd = uvd.available_rings != 0;
ws->info.vce_fw_version =
vce.available_rings ? vce_version : 0;
ws->info.has_userptr = TRUE;
ws->info.r600_num_backends = ws->amdinfo.rb_pipes;
ws->info.r600_clock_crystal_freq = ws->amdinfo.gpu_counter_freq;
ws->info.r600_tiling_config = r600_get_gb_tiling_config(&ws->amdinfo);
ws->info.r600_num_tile_pipes = cik_get_num_tile_pipes(&ws->amdinfo);
ws->info.r600_max_pipes = ws->amdinfo.max_quad_shader_pipes; /* TODO: is this correct? */
ws->info.r600_virtual_address = TRUE;
ws->info.r600_has_dma = dma.available_rings != 0;
memcpy(ws->info.si_tile_mode_array, ws->amdinfo.gb_tile_mode,
sizeof(ws->amdinfo.gb_tile_mode));
ws->info.si_tile_mode_array_valid = TRUE;
ws->info.si_backend_enabled_mask = ws->amdinfo.enabled_rb_pipes_mask;
memcpy(ws->info.cik_macrotile_mode_array, ws->amdinfo.gb_macro_tile_mode,
sizeof(ws->amdinfo.gb_macro_tile_mode));
ws->info.cik_macrotile_mode_array_valid = TRUE;
ws->gart_page_size = alignment_info.size_remote;
return TRUE;
fail:
amdgpu_device_deinitialize(ws->dev);
ws->dev = NULL;
return FALSE;
}
static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
{
struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
pipe_mutex_destroy(ws->bo_fence_lock);
ws->cman->destroy(ws->cman);
ws->kman->destroy(ws->kman);
amdgpu_device_deinitialize(ws->dev);
FREE(rws);
}
static void amdgpu_winsys_query_info(struct radeon_winsys *rws,
struct radeon_info *info)
{
*info = ((struct amdgpu_winsys *)rws)->info;
}
static boolean amdgpu_cs_request_feature(struct radeon_winsys_cs *rcs,
enum radeon_feature_id fid,
boolean enable)
{
return FALSE;
}
static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
enum radeon_value_id value)
{
struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
struct amdgpu_heap_info heap;
uint64_t retval = 0;
switch (value) {
case RADEON_REQUESTED_VRAM_MEMORY:
return ws->allocated_vram;
case RADEON_REQUESTED_GTT_MEMORY:
return ws->allocated_gtt;
case RADEON_BUFFER_WAIT_TIME_NS:
return ws->buffer_wait_time;
case RADEON_TIMESTAMP:
amdgpu_query_info(ws->dev, AMDGPU_INFO_TIMESTAMP, 8, &retval);
return retval;
case RADEON_NUM_CS_FLUSHES:
return ws->num_cs_flushes;
case RADEON_NUM_BYTES_MOVED:
amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
return retval;
case RADEON_VRAM_USAGE:
amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
return heap.heap_usage;
case RADEON_GTT_USAGE:
amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
return heap.heap_usage;
case RADEON_GPU_TEMPERATURE:
case RADEON_CURRENT_SCLK:
case RADEON_CURRENT_MCLK:
return 0;
case RADEON_GPU_RESET_COUNTER:
assert(0);
return 0;
}
return 0;
}
static void amdgpu_read_registers(struct radeon_winsys *rws,
unsigned reg_offset,
unsigned num_registers, uint32_t *out)
{
struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
amdgpu_read_mm_registers(ws->dev, reg_offset / 4, num_registers,
0xffffffff, 0, out);
}
static unsigned hash_dev(void *key)
{
#if defined(PIPE_ARCH_X86_64)
return pointer_to_intptr(key) ^ (pointer_to_intptr(key) >> 32);
#else
return pointer_to_intptr(key);
#endif
}
static int compare_dev(void *key1, void *key2)
{
return key1 != key2;
}
static bool amdgpu_winsys_unref(struct radeon_winsys *ws)
{
struct amdgpu_winsys *rws = (struct amdgpu_winsys*)ws;
bool destroy;
/* When the reference counter drops to zero, remove the device pointer
* from the table.
* This must happen while the mutex is locked, so that
* amdgpu_winsys_create in another thread doesn't get the winsys
* from the table when the counter drops to 0. */
pipe_mutex_lock(dev_tab_mutex);
destroy = pipe_reference(&rws->reference, NULL);
if (destroy && dev_tab)
util_hash_table_remove(dev_tab, rws->dev);
pipe_mutex_unlock(dev_tab_mutex);
return destroy;
}
PUBLIC struct radeon_winsys *
amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
{
struct amdgpu_winsys *ws;
drmVersionPtr version = drmGetVersion(fd);
amdgpu_device_handle dev;
uint32_t drm_major, drm_minor, r;
/* The DRM driver version of amdgpu is 3.x.x. */
if (version->version_major != 3) {
drmFreeVersion(version);
return NULL;
}
drmFreeVersion(version);
/* Look up the winsys from the dev table. */
pipe_mutex_lock(dev_tab_mutex);
if (!dev_tab)
dev_tab = util_hash_table_create(hash_dev, compare_dev);
/* Initialize the amdgpu device. This should always return the same pointer
* for the same fd. */
r = amdgpu_device_initialize(fd, &drm_major, &drm_minor, &dev);
if (r) {
pipe_mutex_unlock(dev_tab_mutex);
fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
return NULL;
}
/* Lookup a winsys if we have already created one for this device. */
ws = util_hash_table_get(dev_tab, dev);
if (ws) {
pipe_reference(NULL, &ws->reference);
pipe_mutex_unlock(dev_tab_mutex);
return &ws->base;
}
/* Create a new winsys. */
ws = CALLOC_STRUCT(amdgpu_winsys);
if (!ws) {
pipe_mutex_unlock(dev_tab_mutex);
return NULL;
}
ws->dev = dev;
ws->info.drm_major = drm_major;
ws->info.drm_minor = drm_minor;
if (!do_winsys_init(ws))
goto fail;
/* Create managers. */
ws->kman = amdgpu_bomgr_create(ws);
if (!ws->kman)
goto fail;
ws->cman = pb_cache_manager_create(ws->kman, 500000, 2.0f, 0,
(ws->info.vram_size + ws->info.gart_size) / 8);
if (!ws->cman)
goto fail;
/* init reference */
pipe_reference_init(&ws->reference, 1);
/* Set functions. */
ws->base.unref = amdgpu_winsys_unref;
ws->base.destroy = amdgpu_winsys_destroy;
ws->base.query_info = amdgpu_winsys_query_info;
ws->base.cs_request_feature = amdgpu_cs_request_feature;
ws->base.query_value = amdgpu_query_value;
ws->base.read_registers = amdgpu_read_registers;
amdgpu_bomgr_init_functions(ws);
amdgpu_cs_init_functions(ws);
pipe_mutex_init(ws->bo_fence_lock);
/* Create the screen at the end. The winsys must be initialized
* completely.
*
* Alternatively, we could create the screen based on "ws->gen"
* and link all drivers into one binary blob. */
ws->base.screen = screen_create(&ws->base);
if (!ws->base.screen) {
amdgpu_winsys_destroy(&ws->base);
pipe_mutex_unlock(dev_tab_mutex);
return NULL;
}
util_hash_table_set(dev_tab, dev, ws);
/* We must unlock the mutex once the winsys is fully initialized, so that
* other threads attempting to create the winsys from the same fd will
* get a fully initialized winsys and not just half-way initialized. */
pipe_mutex_unlock(dev_tab_mutex);
return &ws->base;
fail:
pipe_mutex_unlock(dev_tab_mutex);
if (ws->cman)
ws->cman->destroy(ws->cman);
if (ws->kman)
ws->kman->destroy(ws->kman);
FREE(ws);
return NULL;
}

View File

@ -0,0 +1,71 @@
/*
* Copyright © 2009 Corbin Simpson
* Copyright © 2015 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Marek Olšák <maraeo@gmail.com>
*/
#ifndef AMDGPU_WINSYS_H
#define AMDGPU_WINSYS_H
#include "gallium/drivers/radeon/radeon_winsys.h"
#include "os/os_thread.h"
#include <amdgpu.h>
struct amdgpu_cs;
struct amdgpu_winsys {
struct radeon_winsys base;
struct pipe_reference reference;
amdgpu_device_handle dev;
pipe_mutex bo_fence_lock;
int num_cs; /* The number of command streams created. */
uint32_t next_bo_unique_id;
uint64_t allocated_vram;
uint64_t allocated_gtt;
uint64_t buffer_wait_time; /* time spent in buffer_wait in ns */
uint64_t num_cs_flushes;
unsigned gart_page_size;
struct radeon_info info;
struct pb_manager *kman;
struct pb_manager *cman;
struct amdgpu_gpu_info amdinfo;
};
static inline struct amdgpu_winsys *
amdgpu_winsys(struct radeon_winsys *base)
{
return (struct amdgpu_winsys*)base;
}
#endif

View File

@ -550,6 +550,7 @@ static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
default:
case RING_GFX:
case RING_COMPUTE:
cs->cst->flags[0] = 0;
cs->cst->flags[1] = RADEON_CS_RING_GFX;
cs->cst->cs.num_chunks = 2;
@ -565,7 +566,7 @@ static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
cs->cst->flags[0] |= RADEON_CS_END_OF_FRAME;
cs->cst->cs.num_chunks = 3;
}
if (flags & RADEON_FLUSH_COMPUTE) {
if (cs->base.ring_type == RING_COMPUTE) {
cs->cst->flags[1] = RADEON_CS_RING_COMPUTE;
cs->cst->cs.num_chunks = 3;
}