2015-04-16 21:43:23 +01:00
|
|
|
/*
|
|
|
|
* Copyright © 2011 Marek Olšák <maraeo@gmail.com>
|
|
|
|
* Copyright © 2015 Advanced Micro Devices, Inc.
|
|
|
|
* All Rights Reserved.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining
|
|
|
|
* a copy of this software and associated documentation files (the
|
|
|
|
* "Software"), to deal in the Software without restriction, including
|
|
|
|
* without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
* distribute, sub license, and/or sell copies of the Software, and to
|
|
|
|
* permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
* the following conditions:
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
|
|
|
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
|
|
|
|
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
|
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the
|
|
|
|
* next paragraph) shall be included in all copies or substantial portions
|
|
|
|
* of the Software.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef AMDGPU_CS_H
|
|
|
|
#define AMDGPU_CS_H
|
|
|
|
|
|
|
|
#include "amdgpu_bo.h"
|
|
|
|
#include "util/u_memory.h"
|
2020-04-25 19:03:15 +01:00
|
|
|
#include "drm-uapi/amdgpu_drm.h"
|
2015-04-16 21:43:23 +01:00
|
|
|
|
|
|
|
struct amdgpu_ctx {
|
|
|
|
struct amdgpu_winsys *ws;
|
|
|
|
amdgpu_context_handle ctx;
|
|
|
|
amdgpu_bo_handle user_fence_bo;
|
|
|
|
uint64_t *user_fence_cpu_address_base;
|
|
|
|
int refcount;
|
2017-01-19 19:32:28 +00:00
|
|
|
unsigned initial_num_total_rejected_cs;
|
|
|
|
unsigned num_rejected_cs;
|
2015-04-16 21:43:23 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
struct amdgpu_cs_buffer {
|
|
|
|
struct amdgpu_winsys_bo *bo;
|
2016-09-08 09:05:55 +01:00
|
|
|
union {
|
|
|
|
struct {
|
2018-07-12 05:27:06 +01:00
|
|
|
uint32_t priority_usage;
|
2016-09-08 09:05:55 +01:00
|
|
|
} real;
|
|
|
|
struct {
|
|
|
|
uint32_t real_idx; /* index of underlying real BO */
|
|
|
|
} slab;
|
|
|
|
} u;
|
2015-04-16 21:43:23 +01:00
|
|
|
enum radeon_bo_usage usage;
|
|
|
|
};
|
|
|
|
|
2016-05-07 03:16:05 +01:00
|
|
|
enum ib_type {
|
2020-06-18 06:06:12 +01:00
|
|
|
IB_PREAMBLE,
|
2017-08-19 17:56:36 +01:00
|
|
|
IB_MAIN,
|
2019-02-04 22:48:04 +00:00
|
|
|
IB_PARALLEL_COMPUTE,
|
2017-08-19 17:56:36 +01:00
|
|
|
IB_NUM,
|
2016-05-07 03:16:05 +01:00
|
|
|
};
|
|
|
|
|
2015-08-08 12:27:38 +01:00
|
|
|
struct amdgpu_ib {
|
2020-11-29 09:09:02 +00:00
|
|
|
struct radeon_cmdbuf *rcs; /* pointer to the driver-owned data */
|
2015-08-08 12:27:38 +01:00
|
|
|
|
|
|
|
/* A buffer out of which new IBs are allocated. */
|
|
|
|
struct pb_buffer *big_ib_buffer;
|
|
|
|
uint8_t *ib_mapped;
|
|
|
|
unsigned used_ib_space;
|
2019-02-04 21:30:32 +00:00
|
|
|
|
|
|
|
/* The maximum seen size from cs_check_space. If the driver does
|
|
|
|
* cs_check_space and flush, the newly allocated IB should have at least
|
|
|
|
* this size.
|
|
|
|
*/
|
|
|
|
unsigned max_check_space_size;
|
|
|
|
|
2016-05-07 16:58:13 +01:00
|
|
|
unsigned max_ib_size;
|
2016-05-07 03:33:17 +01:00
|
|
|
uint32_t *ptr_ib_size;
|
2017-10-08 22:47:30 +01:00
|
|
|
bool ptr_ib_size_inside_ib;
|
2016-05-07 03:16:05 +01:00
|
|
|
enum ib_type ib_type;
|
2015-08-08 13:02:02 +01:00
|
|
|
};
|
|
|
|
|
2019-02-04 19:55:03 +00:00
|
|
|
struct amdgpu_fence_list {
|
|
|
|
struct pipe_fence_handle **list;
|
|
|
|
unsigned num;
|
|
|
|
unsigned max;
|
|
|
|
};
|
|
|
|
|
2016-03-08 00:19:31 +00:00
|
|
|
struct amdgpu_cs_context {
|
2017-09-05 23:50:45 +01:00
|
|
|
struct drm_amdgpu_cs_chunk_ib ib[IB_NUM];
|
2015-04-16 21:43:23 +01:00
|
|
|
|
2015-09-26 23:10:00 +01:00
|
|
|
/* Buffers. */
|
2016-09-08 09:05:55 +01:00
|
|
|
unsigned max_real_buffers;
|
|
|
|
unsigned num_real_buffers;
|
2017-02-07 16:35:02 +00:00
|
|
|
struct amdgpu_cs_buffer *real_buffers;
|
|
|
|
|
2016-09-08 09:05:55 +01:00
|
|
|
unsigned num_slab_buffers;
|
|
|
|
unsigned max_slab_buffers;
|
|
|
|
struct amdgpu_cs_buffer *slab_buffers;
|
2015-04-16 21:43:23 +01:00
|
|
|
|
2017-02-07 16:11:00 +00:00
|
|
|
unsigned num_sparse_buffers;
|
|
|
|
unsigned max_sparse_buffers;
|
|
|
|
struct amdgpu_cs_buffer *sparse_buffers;
|
|
|
|
|
2021-05-26 11:24:31 +01:00
|
|
|
int *buffer_indices_hashlist;
|
2015-04-16 21:43:23 +01:00
|
|
|
|
2017-01-26 16:29:32 +00:00
|
|
|
struct amdgpu_winsys_bo *last_added_bo;
|
|
|
|
unsigned last_added_bo_index;
|
|
|
|
unsigned last_added_bo_usage;
|
2018-07-12 05:27:06 +01:00
|
|
|
uint32_t last_added_bo_priority_usage;
|
2017-01-26 16:29:32 +00:00
|
|
|
|
2019-02-04 19:55:03 +00:00
|
|
|
struct amdgpu_fence_list fence_dependencies;
|
2019-02-04 20:27:27 +00:00
|
|
|
struct amdgpu_fence_list syncobj_dependencies;
|
2019-02-04 19:55:03 +00:00
|
|
|
struct amdgpu_fence_list syncobj_to_signal;
|
2017-10-27 03:42:08 +01:00
|
|
|
|
2019-02-04 22:48:04 +00:00
|
|
|
/* The compute IB uses the dependencies above + these: */
|
|
|
|
struct amdgpu_fence_list compute_fence_dependencies;
|
|
|
|
struct amdgpu_fence_list compute_start_fence_dependencies;
|
|
|
|
|
2016-03-08 00:19:31 +00:00
|
|
|
struct pipe_fence_handle *fence;
|
2016-07-13 17:31:16 +01:00
|
|
|
|
|
|
|
/* the error returned from cs_flush for non-async submissions */
|
|
|
|
int error_code;
|
2019-12-06 09:28:10 +00:00
|
|
|
|
|
|
|
/* TMZ: will this command be submitted using the TMZ flag */
|
|
|
|
bool secure;
|
2016-03-08 00:19:31 +00:00
|
|
|
};
|
|
|
|
|
2021-05-26 11:24:31 +01:00
|
|
|
#define BUFFER_HASHLIST_SIZE 4096
|
|
|
|
|
2016-03-08 00:19:31 +00:00
|
|
|
struct amdgpu_cs {
|
|
|
|
struct amdgpu_ib main; /* must be first because this is inherited */
|
2020-11-29 09:09:02 +00:00
|
|
|
struct amdgpu_ib compute_ib; /* optional parallel compute IB */
|
2021-03-24 00:38:48 +00:00
|
|
|
struct amdgpu_winsys *ws;
|
2016-03-08 00:19:31 +00:00
|
|
|
struct amdgpu_ctx *ctx;
|
|
|
|
enum ring_type ring_type;
|
2017-09-05 23:50:45 +01:00
|
|
|
struct drm_amdgpu_cs_chunk_fence fence_chunk;
|
2016-03-08 00:19:31 +00:00
|
|
|
|
|
|
|
/* We flip between these two CS. While one is being consumed
|
|
|
|
* by the kernel in another thread, the other one is being filled
|
|
|
|
* by the pipe driver. */
|
|
|
|
struct amdgpu_cs_context csc1;
|
|
|
|
struct amdgpu_cs_context csc2;
|
|
|
|
/* The currently-used CS. */
|
|
|
|
struct amdgpu_cs_context *csc;
|
|
|
|
/* The CS being currently-owned by the other thread. */
|
|
|
|
struct amdgpu_cs_context *cst;
|
2021-05-26 11:24:31 +01:00
|
|
|
/* This is only used by csc, not cst */
|
|
|
|
int buffer_indices_hashlist[BUFFER_HASHLIST_SIZE];
|
2016-03-08 00:19:31 +00:00
|
|
|
|
|
|
|
/* Flush CS. */
|
|
|
|
void (*flush_cs)(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
|
|
|
|
void *flush_data;
|
2018-11-02 20:09:13 +00:00
|
|
|
bool stop_exec_on_failure;
|
2020-11-24 03:57:25 +00:00
|
|
|
bool noop;
|
2021-05-26 11:02:48 +01:00
|
|
|
bool has_chaining;
|
2016-03-08 00:19:31 +00:00
|
|
|
|
2016-06-11 12:10:49 +01:00
|
|
|
struct util_queue_fence flush_completed;
|
2016-08-01 23:44:55 +01:00
|
|
|
struct pipe_fence_handle *next_fence;
|
2020-06-18 06:06:12 +01:00
|
|
|
struct pb_buffer *preamble_ib_bo;
|
2015-04-16 21:43:23 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
struct amdgpu_fence {
|
|
|
|
struct pipe_reference reference;
|
2017-09-12 19:13:06 +01:00
|
|
|
/* If ctx == NULL, this fence is syncobj-based. */
|
|
|
|
uint32_t syncobj;
|
2015-04-16 21:43:23 +01:00
|
|
|
|
2017-09-12 19:13:06 +01:00
|
|
|
struct amdgpu_winsys *ws;
|
2015-04-16 21:43:23 +01:00
|
|
|
struct amdgpu_ctx *ctx; /* submission context */
|
|
|
|
struct amdgpu_cs_fence fence;
|
|
|
|
uint64_t *user_fence_cpu_address;
|
|
|
|
|
2017-11-09 13:00:22 +00:00
|
|
|
/* If the fence has been submitted. This is unsignalled for deferred fences
|
|
|
|
* (cs->next_fence) and while an IB is still being submitted in the submit
|
|
|
|
* thread. */
|
|
|
|
struct util_queue_fence submitted;
|
|
|
|
|
2015-04-16 21:43:23 +01:00
|
|
|
volatile int signalled; /* bool (int for atomicity) */
|
|
|
|
};
|
|
|
|
|
2017-09-12 19:13:06 +01:00
|
|
|
static inline bool amdgpu_fence_is_syncobj(struct amdgpu_fence *fence)
|
|
|
|
{
|
|
|
|
return fence->ctx == NULL;
|
|
|
|
}
|
|
|
|
|
2015-04-16 21:43:23 +01:00
|
|
|
static inline void amdgpu_ctx_unref(struct amdgpu_ctx *ctx)
|
|
|
|
{
|
|
|
|
if (p_atomic_dec_zero(&ctx->refcount)) {
|
|
|
|
amdgpu_cs_ctx_free(ctx->ctx);
|
|
|
|
amdgpu_bo_free(ctx->user_fence_bo);
|
|
|
|
FREE(ctx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void amdgpu_fence_reference(struct pipe_fence_handle **dst,
|
|
|
|
struct pipe_fence_handle *src)
|
|
|
|
{
|
2019-01-19 00:36:49 +00:00
|
|
|
struct amdgpu_fence **adst = (struct amdgpu_fence **)dst;
|
|
|
|
struct amdgpu_fence *asrc = (struct amdgpu_fence *)src;
|
2015-04-16 21:43:23 +01:00
|
|
|
|
2019-01-19 00:36:49 +00:00
|
|
|
if (pipe_reference(&(*adst)->reference, &asrc->reference)) {
|
|
|
|
struct amdgpu_fence *fence = *adst;
|
2017-09-12 19:13:06 +01:00
|
|
|
|
|
|
|
if (amdgpu_fence_is_syncobj(fence))
|
|
|
|
amdgpu_cs_destroy_syncobj(fence->ws->dev, fence->syncobj);
|
|
|
|
else
|
|
|
|
amdgpu_ctx_unref(fence->ctx);
|
|
|
|
|
2017-11-09 13:00:22 +00:00
|
|
|
util_queue_fence_destroy(&fence->submitted);
|
2017-09-12 19:13:06 +01:00
|
|
|
FREE(fence);
|
2015-04-16 21:43:23 +01:00
|
|
|
}
|
2019-01-19 00:36:49 +00:00
|
|
|
*adst = asrc;
|
2015-04-16 21:43:23 +01:00
|
|
|
}
|
|
|
|
|
2021-02-02 01:24:07 +00:00
|
|
|
int amdgpu_lookup_buffer_any_type(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo);
|
2015-04-16 21:43:23 +01:00
|
|
|
|
|
|
|
static inline struct amdgpu_cs *
|
2020-11-29 09:09:02 +00:00
|
|
|
amdgpu_cs(struct radeon_cmdbuf *rcs)
|
2015-04-16 21:43:23 +01:00
|
|
|
{
|
2020-11-29 09:09:02 +00:00
|
|
|
struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs->priv;
|
|
|
|
assert(!cs || cs->main.ib_type == IB_MAIN);
|
|
|
|
return cs;
|
2015-04-16 21:43:23 +01:00
|
|
|
}
|
|
|
|
|
2016-05-07 03:16:05 +01:00
|
|
|
#define get_container(member_ptr, container_type, container_member) \
|
|
|
|
(container_type *)((char *)(member_ptr) - offsetof(container_type, container_member))
|
|
|
|
|
2016-06-21 20:29:39 +01:00
|
|
|
static inline bool
|
2015-04-16 21:43:23 +01:00
|
|
|
amdgpu_bo_is_referenced_by_cs(struct amdgpu_cs *cs,
|
|
|
|
struct amdgpu_winsys_bo *bo)
|
|
|
|
{
|
2021-02-03 03:37:28 +00:00
|
|
|
return amdgpu_lookup_buffer_any_type(cs->csc, bo) != -1;
|
2015-04-16 21:43:23 +01:00
|
|
|
}
|
|
|
|
|
2016-06-21 20:29:39 +01:00
|
|
|
static inline bool
|
2015-04-16 21:43:23 +01:00
|
|
|
amdgpu_bo_is_referenced_by_cs_with_usage(struct amdgpu_cs *cs,
|
|
|
|
struct amdgpu_winsys_bo *bo,
|
|
|
|
enum radeon_bo_usage usage)
|
|
|
|
{
|
|
|
|
int index;
|
2016-09-08 09:05:55 +01:00
|
|
|
struct amdgpu_cs_buffer *buffer;
|
2015-04-16 21:43:23 +01:00
|
|
|
|
2021-02-02 01:24:07 +00:00
|
|
|
index = amdgpu_lookup_buffer_any_type(cs->csc, bo);
|
2015-04-16 21:43:23 +01:00
|
|
|
if (index == -1)
|
2016-06-21 20:29:39 +01:00
|
|
|
return false;
|
2015-04-16 21:43:23 +01:00
|
|
|
|
2017-02-07 16:11:00 +00:00
|
|
|
buffer = bo->bo ? &cs->csc->real_buffers[index] :
|
2020-11-12 19:25:43 +00:00
|
|
|
bo->base.usage & RADEON_FLAG_SPARSE ? &cs->csc->sparse_buffers[index] :
|
2017-02-07 16:11:00 +00:00
|
|
|
&cs->csc->slab_buffers[index];
|
2016-09-08 09:05:55 +01:00
|
|
|
|
|
|
|
return (buffer->usage & usage) != 0;
|
2015-04-16 21:43:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
|
|
|
|
bool absolute);
|
2017-02-07 16:53:49 +00:00
|
|
|
void amdgpu_add_fences(struct amdgpu_winsys_bo *bo,
|
|
|
|
unsigned num_fences,
|
|
|
|
struct pipe_fence_handle **fences);
|
2018-06-19 02:07:10 +01:00
|
|
|
void amdgpu_cs_sync_flush(struct radeon_cmdbuf *rcs);
|
2019-06-28 15:06:23 +01:00
|
|
|
void amdgpu_cs_init_functions(struct amdgpu_screen_winsys *ws);
|
2015-04-16 21:43:23 +01:00
|
|
|
|
|
|
|
#endif
|