Compare commits

..

5 Commits

Author SHA1 Message Date
Hans-Kristian Arntzen f74705b11a vkd3d: Flush copy queue in GetCudaSurface.
Signed-off-by: Hans-Kristian Arntzen <post@arntzen-software.no>
2022-07-07 15:20:05 +02:00
Hans-Kristian Arntzen ad15a7eb01 vkd3d: MEGAHACK: Experiment with deferred descriptor copies.
Attempts to move overhead from render threads to submission threads
which are twiddling thumbs most of the time.

Signed-off-by: Hans-Kristian Arntzen <post@arntzen-software.no>
2022-07-07 14:59:51 +02:00
Hans-Kristian Arntzen 02d9b6c61c profiler: Add --delta to profile helper tool.
Signed-off-by: Hans-Kristian Arntzen <post@arntzen-software.no>
2022-07-06 12:44:58 +02:00
Hans-Kristian Arntzen b6aac42aa6 profiler: Use rdtsc instead of QPC.
Runs much faster and we don't really need accurate ns readings.

Signed-off-by: Hans-Kristian Arntzen <post@arntzen-software.no>
2022-07-06 12:44:58 +02:00
Hans-Kristian Arntzen be3b44b01c common: Add rdtsc helper.
Signed-off-by: Hans-Kristian Arntzen <post@arntzen-software.no>
2022-07-06 12:44:58 +02:00
44 changed files with 1010 additions and 6252 deletions

View File

@ -15,10 +15,11 @@ jobs:
- name: Build release
id: build-release
uses: Joshua-Ashton/arch-mingw-github-action@v8
uses: Joshua-Ashton/arch-mingw-github-action@v7
with:
command: |
export VERSION_NAME="${GITHUB_REF##*/}-${GITHUB_SHA##*/}"
git config --global --add safe.directory "$GITHUB_WORKSPACE"
./package-release.sh ${VERSION_NAME} build --no-package
echo "VERSION_NAME=${VERSION_NAME}" >> $GITHUB_ENV

View File

@ -18,58 +18,64 @@ jobs:
- name: Build MinGW x86
id: build-mingw-x86
uses: Joshua-Ashton/arch-mingw-github-action@v8
uses: Joshua-Ashton/arch-mingw-github-action@v7
with:
command: |
git config --global --add safe.directory "$GITHUB_WORKSPACE"
meson -Denable_tests=True -Denable_extras=True --cross-file=build-win32.txt --buildtype release build-mingw-x86
ninja -C build-mingw-x86
- name: Build MinGW x64
id: build-mingw-x64
uses: Joshua-Ashton/arch-mingw-github-action@v8
uses: Joshua-Ashton/arch-mingw-github-action@v7
with:
command: |
git config --global --add safe.directory "$GITHUB_WORKSPACE"
meson -Denable_tests=True -Denable_extras=True --cross-file=build-win64.txt --buildtype release build-mingw-x64
ninja -C build-mingw-x64
- name: Build Native GCC x86
id: build-native-gcc-x86
uses: Joshua-Ashton/arch-mingw-github-action@v8
uses: Joshua-Ashton/arch-mingw-github-action@v7
with:
command: |
export CC="gcc -m32"
export CXX="g++ -m32"
export PKG_CONFIG_PATH="/usr/lib32/pkgconfig:/usr/lib/i386-linux-gnu/pkgconfig:/usr/lib/pkgconfig"
git config --global --add safe.directory "$GITHUB_WORKSPACE"
meson -Denable_tests=True -Denable_extras=True --buildtype release build-native-gcc-x86
ninja -C build-native-gcc-x86
- name: Build Native GCC x64
id: build-native-gcc-x64
uses: Joshua-Ashton/arch-mingw-github-action@v8
uses: Joshua-Ashton/arch-mingw-github-action@v7
with:
command: |
export CC="gcc"
export CXX="g++"
git config --global --add safe.directory "$GITHUB_WORKSPACE"
meson -Denable_tests=True -Denable_extras=True --buildtype release build-native-gcc-x64
ninja -C build-native-gcc-x64
- name: Build Native Clang x86
id: build-native-clang-x86
uses: Joshua-Ashton/arch-mingw-github-action@v8
uses: Joshua-Ashton/arch-mingw-github-action@v7
with:
command: |
export CC="clang -m32"
export CXX="clang++ -m32"
export PKG_CONFIG_PATH="/usr/lib32/pkgconfig:/usr/lib/i386-linux-gnu/pkgconfig:/usr/lib/pkgconfig"
git config --global --add safe.directory "$GITHUB_WORKSPACE"
meson -Denable_tests=True -Denable_extras=True --buildtype release build-native-clang-x86
ninja -C build-native-clang-x86
- name: Build Native Clang x64
id: build-native-clang-x64
uses: Joshua-Ashton/arch-mingw-github-action@v8
uses: Joshua-Ashton/arch-mingw-github-action@v7
with:
command: |
export CC="clang"
export CXX="clang++"
git config --global --add safe.directory "$GITHUB_WORKSPACE"
meson -Denable_tests=True -Denable_extras=True --buildtype release build-native-clang-x64
ninja -C build-native-clang-x64

View File

@ -30,7 +30,6 @@ WCHAR *vkd3d_dup_demangled_entry_point(const char *str);
char *vkd3d_dup_demangled_entry_point_ascii(const char *str);
bool vkd3d_export_strequal(const WCHAR *a, const WCHAR *b);
bool vkd3d_export_strequal_mixed(const WCHAR *a, const char *b);
bool vkd3d_export_strequal_substr(const WCHAR *a, size_t n, const WCHAR *b);
char *vkd3d_strdup(const char *str);

View File

@ -6,12 +6,6 @@ COMP_SOURCES := $(wildcard $(M)/*.comp)
TESC_SOURCES := $(wildcard $(M)/*.tesc)
TESE_SOURCES := $(wildcard $(M)/*.tese)
GEOM_SOURCES := $(wildcard $(M)/*.geom)
RGEN_SOURCES := $(wildcard $(M)/*.rgen)
RINT_SOURCES := $(wildcard $(M)/*.rint)
RAHIT_SOURCES := $(wildcard $(M)/*.rahit)
RCHIT_SOURCES := $(wildcard $(M)/*.rchit)
RMISS_SOURCES := $(wildcard $(M)/*.rmiss)
RCALL_SOURCES := $(wildcard $(M)/*.rcall)
SPV_OBJECTS := \
$(VERT_SOURCES:.vert=.spv) \
@ -19,49 +13,25 @@ SPV_OBJECTS := \
$(COMP_SOURCES:.comp=.spv) \
$(TESC_SOURCES:.tesc=.spv) \
$(TESE_SOURCES:.tese=.spv) \
$(GEOM_SOURCES:.geom=.spv) \
$(RGEN_SOURCES:.rgen=.spv) \
$(RINT_SOURCES:.rint=.spv) \
$(RAHIT_SOURCES:.rahit=.spv) \
$(RCHIT_SOURCES:.rchit=.spv) \
$(RMISS_SOURCES:.rmiss=.spv) \
$(RCALL_SOURCES:.rcall=.spv)
$(GEOM_SOURCES:.geom=.spv)
%.spv: %.vert
glslc -o $@ $< -I$(INCLUDE_DIR) --target-env=vulkan1.1 $(GLSLC_FLAGS)
glslc -o $@ $< -I$(INCLUDE_DIR) --target-env=vulkan1.1
%.spv: %.frag
glslc -o $@ $< -I$(INCLUDE_DIR) --target-env=vulkan1.1 -DDEBUG_CHANNEL_HELPER_LANES $(GLSLC_FLAGS)
glslc -o $@ $< -I$(INCLUDE_DIR) --target-env=vulkan1.1 -DDEBUG_CHANNEL_HELPER_LANES
%.spv: %.comp
glslc -o $@ $< -I$(INCLUDE_DIR) --target-env=vulkan1.1 $(GLSLC_FLAGS)
glslc -o $@ $< -I$(INCLUDE_DIR) --target-env=vulkan1.1
%.spv: %.geom
glslc -o $@ $< -I$(INCLUDE_DIR) --target-env=vulkan1.1 $(GLSLC_FLAGS)
glslc -o $@ $< -I$(INCLUDE_DIR) --target-env=vulkan1.1
%.spv: %.tesc
glslc -o $@ $< -I$(INCLUDE_DIR) --target-env=vulkan1.1 $(GLSLC_FLAGS)
glslc -o $@ $< -I$(INCLUDE_DIR) --target-env=vulkan1.1
%.spv: %.tese
glslc -o $@ $< -I$(INCLUDE_DIR) --target-env=vulkan1.1 $(GLSLC_FLAGS)
%.spv: %.rgen
glslc -o $@ $< -I$(INCLUDE_DIR) --target-env=vulkan1.1 --target-spv=spv1.4 $(GLSLC_FLAGS)
%.spv: %.rint
glslc -o $@ $< -I$(INCLUDE_DIR) --target-env=vulkan1.1 --target-spv=spv1.4 $(GLSLC_FLAGS)
%.spv: %.rahit
glslc -o $@ $< -I$(INCLUDE_DIR) --target-env=vulkan1.1 --target-spv=spv1.4 $(GLSLC_FLAGS)
%.spv: %.rchit
glslc -o $@ $< -I$(INCLUDE_DIR) --target-env=vulkan1.1 --target-spv=spv1.4 $(GLSLC_FLAGS)
%.spv: %.rmiss
glslc -o $@ $< -I$(INCLUDE_DIR) --target-env=vulkan1.1 --target-spv=spv1.4 $(GLSLC_FLAGS)
%.spv: %.rcall
glslc -o $@ $< -I$(INCLUDE_DIR) --target-env=vulkan1.1 --target-spv=spv1.4 $(GLSLC_FLAGS)
glslc -o $@ $< -I$(INCLUDE_DIR) --target-env=vulkan1.1
all: $(SPV_OBJECTS)

View File

@ -97,14 +97,6 @@ void DEBUG_CHANNEL_INIT(uvec3 id)
#endif
}
void DEBUG_CHANNEL_INIT_IMPLICIT_INSTANCE(uvec3 id, uint inst)
{
if (!DEBUG_SHADER_RING_ACTIVE)
return;
DEBUG_CHANNEL_ID = id;
DEBUG_CHANNEL_INSTANCE_COUNTER = inst;
}
void DEBUG_CHANNEL_UNLOCK_MESSAGE(RingBuffer buf, uint offset, uint num_words)
{
memoryBarrierBuffer();

View File

@ -89,9 +89,6 @@ extern "C" {
#define VKD3D_CONFIG_FLAG_SHADER_CACHE_SYNC (1ull << 27)
#define VKD3D_CONFIG_FLAG_FORCE_RAW_VA_CBV (1ull << 28)
#define VKD3D_CONFIG_FLAG_ZERO_MEMORY_WORKAROUNDS_COMMITTED_BUFFER_UAV (1ull << 29)
#define VKD3D_CONFIG_FLAG_ALLOW_SBT_COLLECTION (1ull << 30)
#define VKD3D_CONFIG_FLAG_FORCE_NATIVE_FP16 (1ull << 31)
#define VKD3D_CONFIG_FLAG_USE_HOST_IMPORT_FALLBACK (1ull << 32)
typedef HRESULT (*PFN_vkd3d_signal_event)(HANDLE event);

View File

@ -3644,8 +3644,8 @@ interface ID3D12CommandQueue : ID3D12Pageable
ID3D12Heap *heap,
UINT range_count,
const D3D12_TILE_RANGE_FLAGS *range_flags,
const UINT *heap_range_offsets,
const UINT *range_tile_counts,
UINT *heap_range_offsets,
UINT *range_tile_counts,
D3D12_TILE_MAPPING_FLAGS flags);
void CopyTileMappings(ID3D12Resource *dst_resource,

View File

@ -660,7 +660,6 @@ struct vkd3d_shader_scan_info
bool has_side_effects;
bool needs_late_zs;
bool discards;
bool has_uav_counter;
unsigned int patch_vertex_count;
};
@ -754,11 +753,7 @@ int vkd3d_shader_compile_dxbc(const struct vkd3d_shader_code *dxbc,
void vkd3d_shader_free_shader_code(struct vkd3d_shader_code *code);
int vkd3d_shader_parse_root_signature(const struct vkd3d_shader_code *dxbc,
struct vkd3d_versioned_root_signature_desc *root_signature,
vkd3d_shader_hash_t *compatibility_hash);
int vkd3d_shader_parse_root_signature_raw(const char *data, unsigned int data_size,
struct vkd3d_versioned_root_signature_desc *desc,
vkd3d_shader_hash_t *compatibility_hash);
struct vkd3d_versioned_root_signature_desc *root_signature);
void vkd3d_shader_free_root_signature(struct vkd3d_versioned_root_signature_desc *root_signature);
/* FIXME: Add support for returning error messages (ID3DBlob). */
@ -784,65 +779,19 @@ void vkd3d_shader_free_shader_signature(struct vkd3d_shader_signature *signature
struct vkd3d_shader_library_entry_point
{
unsigned int identifier;
VkShaderStageFlagBits stage;
WCHAR *mangled_entry_point;
WCHAR *plain_entry_point;
char *real_entry_point;
VkShaderStageFlagBits stage;
};
enum vkd3d_shader_subobject_kind
{
/* Matches DXIL for simplicity. */
VKD3D_SHADER_SUBOBJECT_KIND_STATE_OBJECT_CONFIG = 0,
VKD3D_SHADER_SUBOBJECT_KIND_GLOBAL_ROOT_SIGNATURE = 1,
VKD3D_SHADER_SUBOBJECT_KIND_LOCAL_ROOT_SIGNATURE = 2,
VKD3D_SHADER_SUBOBJECT_KIND_SUBOBJECT_TO_EXPORTS_ASSOCIATION = 8,
VKD3D_SHADER_SUBOBJECT_KIND_RAYTRACING_SHADER_CONFIG = 9,
VKD3D_SHADER_SUBOBJECT_KIND_RAYTRACING_PIPELINE_CONFIG = 10,
VKD3D_SHADER_SUBOBJECT_KIND_HIT_GROUP = 11,
VKD3D_SHADER_SUBOBJECT_KIND_RAYTRACING_PIPELINE_CONFIG1 = 12,
};
struct vkd3d_shader_library_subobject
{
enum vkd3d_shader_subobject_kind kind;
unsigned int dxil_identifier;
/* All const pointers here point directly to the DXBC blob,
* so they do not need to be freed.
* Fortunately for us, the C strings are zero-terminated in the blob itself. */
/* In the blob, ASCII is used as identifier, where API uses wide strings, sigh ... */
const char *name;
union
{
D3D12_RAYTRACING_PIPELINE_CONFIG1 pipeline_config;
D3D12_RAYTRACING_SHADER_CONFIG shader_config;
D3D12_STATE_OBJECT_CONFIG object_config;
/* Duped strings because API wants wide strings for no good reason. */
D3D12_HIT_GROUP_DESC hit_group;
D3D12_DXIL_SUBOBJECT_TO_EXPORTS_ASSOCIATION association;
struct
{
const void *data;
size_t size;
} payload;
} data;
};
int vkd3d_shader_dxil_append_library_entry_points_and_subobjects(
int vkd3d_shader_dxil_append_library_entry_points(
const D3D12_DXIL_LIBRARY_DESC *library_desc,
unsigned int identifier,
struct vkd3d_shader_library_entry_point **entry_points,
size_t *entry_point_size, size_t *entry_point_count,
struct vkd3d_shader_library_subobject **subobjects,
size_t *subobjects_size, size_t *subobjects_count);
size_t *entry_point_size, size_t *entry_point_count);
void vkd3d_shader_dxil_free_library_entry_points(struct vkd3d_shader_library_entry_point *entry_points, size_t count);
void vkd3d_shader_dxil_free_library_subobjects(struct vkd3d_shader_library_subobject *subobjects, size_t count);
int vkd3d_shader_compile_dxil_export(const struct vkd3d_shader_code *dxil,
const char *export,
@ -868,8 +817,7 @@ typedef int (*PFN_vkd3d_shader_compile_dxbc)(const struct vkd3d_shader_code *dxb
typedef void (*PFN_vkd3d_shader_free_shader_code)(struct vkd3d_shader_code *code);
typedef int (*PFN_vkd3d_shader_parse_root_signature)(const struct vkd3d_shader_code *dxbc,
struct vkd3d_versioned_root_signature_desc *root_signature,
vkd3d_shader_hash_t *compatibility_hash);
struct vkd3d_versioned_root_signature_desc *root_signature);
typedef void (*PFN_vkd3d_shader_free_root_signature)(struct vkd3d_versioned_root_signature_desc *root_signature);
typedef int (*PFN_vkd3d_shader_serialize_root_signature)(

View File

@ -82,21 +82,6 @@ bool vkd3d_export_strequal(const WCHAR *a, const WCHAR *b)
return *a == *b;
}
bool vkd3d_export_strequal_mixed(const WCHAR *a, const char *b)
{
if (!a || !b)
return false;
while (*a != '\0' && *b != '\0')
{
if (*a != *b)
return false;
a++;
b++;
}
return *a == *b;
}
bool vkd3d_export_strequal_substr(const WCHAR *a, size_t expected_n, const WCHAR *b)
{
size_t n = 0;

View File

@ -2755,9 +2755,8 @@ static int shader_parse_static_samplers(struct root_signature_parser_context *co
return VKD3D_OK;
}
int vkd3d_shader_parse_root_signature_raw(const char *data, unsigned int data_size,
struct vkd3d_versioned_root_signature_desc *desc,
vkd3d_shader_hash_t *compatibility_hash)
static int shader_parse_root_signature(const char *data, unsigned int data_size,
struct vkd3d_versioned_root_signature_desc *desc)
{
struct vkd3d_root_signature_desc *v_1_0 = &desc->v_1_0;
struct root_signature_parser_context context;
@ -2765,8 +2764,6 @@ int vkd3d_shader_parse_root_signature_raw(const char *data, unsigned int data_si
const char *ptr = data;
int ret;
memset(desc, 0, sizeof(*desc));
context.data = data;
context.data_size = data_size;
@ -2838,46 +2835,28 @@ int vkd3d_shader_parse_root_signature_raw(const char *data, unsigned int data_si
read_uint32(&ptr, &v_1_0->flags);
TRACE("Flags %#x.\n", v_1_0->flags);
if (compatibility_hash)
{
struct vkd3d_shader_code code = { data, data_size };
*compatibility_hash = vkd3d_shader_hash(&code);
}
return VKD3D_OK;
}
static int rts0_handler(const char *data, DWORD data_size, DWORD tag, void *context)
{
struct vkd3d_shader_code *payload = context;
struct vkd3d_versioned_root_signature_desc *desc = context;
if (tag != TAG_RTS0)
return VKD3D_OK;
payload->code = data;
payload->size = data_size;
return VKD3D_OK;
return shader_parse_root_signature(data, data_size, desc);
}
int vkd3d_shader_parse_root_signature(const struct vkd3d_shader_code *dxbc,
struct vkd3d_versioned_root_signature_desc *root_signature,
vkd3d_shader_hash_t *compatibility_hash)
struct vkd3d_versioned_root_signature_desc *root_signature)
{
struct vkd3d_shader_code raw_payload;
int ret;
TRACE("dxbc {%p, %zu}, root_signature %p.\n", dxbc->code, dxbc->size, root_signature);
memset(&raw_payload, 0, sizeof(raw_payload));
if ((ret = parse_dxbc(dxbc->code, dxbc->size, rts0_handler, &raw_payload)) < 0)
return ret;
if (!raw_payload.code)
return VKD3D_ERROR;
if ((ret = vkd3d_shader_parse_root_signature_raw(raw_payload.code, raw_payload.size,
root_signature, compatibility_hash)) < 0)
memset(root_signature, 0, sizeof(*root_signature));
if ((ret = parse_dxbc(dxbc->code, dxbc->size, rts0_handler, root_signature)) < 0)
{
vkd3d_shader_free_root_signature(root_signature);
return ret;

View File

@ -1352,31 +1352,6 @@ void vkd3d_shader_dxil_free_library_entry_points(struct vkd3d_shader_library_ent
vkd3d_free(entry_points);
}
void vkd3d_shader_dxil_free_library_subobjects(struct vkd3d_shader_library_subobject *subobjects, size_t count)
{
size_t i, j;
for (i = 0; i < count; i++)
{
if (subobjects[i].kind == VKD3D_SHADER_SUBOBJECT_KIND_SUBOBJECT_TO_EXPORTS_ASSOCIATION)
{
for (j = 0; j < subobjects[i].data.association.NumExports; j++)
vkd3d_free((void*)subobjects[i].data.association.pExports[j]);
vkd3d_free((void*)subobjects[i].data.association.pExports);
vkd3d_free((void*)subobjects[i].data.association.SubobjectToAssociate);
}
else if (subobjects[i].kind == VKD3D_SHADER_SUBOBJECT_KIND_HIT_GROUP)
{
vkd3d_free((void*)subobjects[i].data.hit_group.HitGroupExport);
vkd3d_free((void*)subobjects[i].data.hit_group.AnyHitShaderImport);
vkd3d_free((void*)subobjects[i].data.hit_group.ClosestHitShaderImport);
vkd3d_free((void*)subobjects[i].data.hit_group.IntersectionShaderImport);
}
}
vkd3d_free(subobjects);
}
static VkShaderStageFlagBits convert_stage(dxil_spv_shader_stage stage)
{
/* Only interested in RT entry_points. There is no way yet to use lib_6_3+ for non-RT. */
@ -1421,95 +1396,20 @@ static bool vkd3d_dxil_build_entry(struct vkd3d_shader_library_entry_point *entr
return true;
}
static void vkd3d_shader_dxil_copy_subobject(unsigned int identifier,
struct vkd3d_shader_library_subobject *subobject,
const dxil_spv_rdat_subobject *dxil_subobject)
{
unsigned int i;
/* Reuse same enums as DXIL. */
subobject->kind = (enum vkd3d_shader_subobject_kind)dxil_subobject->kind;
subobject->name = dxil_subobject->subobject_name;
subobject->dxil_identifier = identifier;
switch (dxil_subobject->kind)
{
case DXIL_SPV_RDAT_SUBOBJECT_KIND_GLOBAL_ROOT_SIGNATURE:
case DXIL_SPV_RDAT_SUBOBJECT_KIND_LOCAL_ROOT_SIGNATURE:
subobject->data.payload.data = dxil_subobject->payload;
subobject->data.payload.size = dxil_subobject->payload_size;
break;
case DXIL_SPV_RDAT_SUBOBJECT_KIND_RAYTRACING_PIPELINE_CONFIG:
/* Normalize the kind. */
subobject->kind = VKD3D_SHADER_SUBOBJECT_KIND_RAYTRACING_PIPELINE_CONFIG1;
subobject->data.pipeline_config.MaxTraceRecursionDepth = dxil_subobject->args[0];
subobject->data.pipeline_config.Flags = 0;
break;
case DXIL_SPV_RDAT_SUBOBJECT_KIND_RAYTRACING_PIPELINE_CONFIG1:
subobject->kind = VKD3D_SHADER_SUBOBJECT_KIND_RAYTRACING_PIPELINE_CONFIG1;
subobject->data.pipeline_config.MaxTraceRecursionDepth = dxil_subobject->args[0];
subobject->data.pipeline_config.Flags = dxil_subobject->args[1];
break;
case DXIL_SPV_RDAT_SUBOBJECT_KIND_RAYTRACING_SHADER_CONFIG:
subobject->data.shader_config.MaxPayloadSizeInBytes = dxil_subobject->args[0];
subobject->data.shader_config.MaxAttributeSizeInBytes = dxil_subobject->args[1];
break;
case DXIL_SPV_RDAT_SUBOBJECT_KIND_HIT_GROUP:
/* Enum aliases. */
subobject->data.hit_group.Type = (D3D12_HIT_GROUP_TYPE)dxil_subobject->hit_group_type;
assert(dxil_subobject->num_exports == 3);
/* Implementation simplifies a lot if we can reuse the D3D12 type here. */
subobject->data.hit_group.HitGroupExport = vkd3d_dup_entry_point(dxil_subobject->subobject_name);
subobject->data.hit_group.AnyHitShaderImport = dxil_subobject->exports[0] && *dxil_subobject->exports[0] != '\0' ?
vkd3d_dup_entry_point(dxil_subobject->exports[0]) : NULL;
subobject->data.hit_group.ClosestHitShaderImport = dxil_subobject->exports[1] && *dxil_subobject->exports[1] != '\0' ?
vkd3d_dup_entry_point(dxil_subobject->exports[1]) : NULL;
subobject->data.hit_group.IntersectionShaderImport = dxil_subobject->exports[2] && *dxil_subobject->exports[2] != '\0' ?
vkd3d_dup_entry_point(dxil_subobject->exports[2]) : NULL;
break;
case DXIL_SPV_RDAT_SUBOBJECT_KIND_STATE_OBJECT_CONFIG:
subobject->data.object_config.Flags = dxil_subobject->args[0];
break;
case DXIL_SPV_RDAT_SUBOBJECT_KIND_SUBOBJECT_TO_EXPORTS_ASSOCIATION:
assert(dxil_subobject->num_exports >= 1);
subobject->data.association.SubobjectToAssociate = vkd3d_dup_entry_point(dxil_subobject->exports[0]);
subobject->data.association.pExports = vkd3d_malloc((dxil_subobject->num_exports - 1) * sizeof(LPCWSTR));
subobject->data.association.NumExports = dxil_subobject->num_exports - 1;
for (i = 1; i < dxil_subobject->num_exports; i++)
subobject->data.association.pExports[i - 1] = vkd3d_dup_entry_point(dxil_subobject->exports[i]);
break;
default:
FIXME("Unrecognized RDAT subobject type: %u.\n", dxil_subobject->kind);
break;
}
}
int vkd3d_shader_dxil_append_library_entry_points_and_subobjects(
int vkd3d_shader_dxil_append_library_entry_points(
const D3D12_DXIL_LIBRARY_DESC *library_desc,
unsigned int identifier,
struct vkd3d_shader_library_entry_point **entry_points,
size_t *entry_point_size, size_t *entry_point_count,
struct vkd3d_shader_library_subobject **subobjects,
size_t *subobjects_size, size_t *subobjects_count)
size_t *entry_point_size, size_t *entry_point_count)
{
struct vkd3d_shader_library_entry_point new_entry;
struct vkd3d_shader_library_subobject *subobject;
dxil_spv_parsed_blob blob = NULL;
struct vkd3d_shader_code code;
dxil_spv_rdat_subobject sub;
dxil_spv_shader_stage stage;
const char *mangled_entry;
char *ascii_entry = NULL;
vkd3d_shader_hash_t hash;
unsigned int count, i, j;
unsigned int rdat_count;
unsigned int count, i;
int ret = VKD3D_OK;
memset(&new_entry, 0, sizeof(new_entry));
@ -1530,8 +1430,6 @@ int vkd3d_shader_dxil_append_library_entry_points_and_subobjects(
goto end;
}
rdat_count = dxil_spv_parsed_blob_get_num_rdat_subobjects(blob);
if (library_desc->NumExports)
{
for (i = 0; i < library_desc->NumExports; i++)
@ -1541,44 +1439,24 @@ int vkd3d_shader_dxil_append_library_entry_points_and_subobjects(
else
ascii_entry = vkd3d_strdup_w_utf8(library_desc->pExports[i].Name, 0);
/* An export can point to a subobject or an entry point. */
for (j = 0; j < rdat_count; j++)
stage = dxil_spv_parsed_blob_get_shader_stage_for_entry(blob, ascii_entry);
if (stage == DXIL_SPV_STAGE_UNKNOWN)
{
dxil_spv_parsed_blob_get_rdat_subobject(blob, j, &sub);
/* Subobject names are not mangled. */
if (strcmp(sub.subobject_name, ascii_entry) == 0)
break;
ret = VKD3D_ERROR_INVALID_ARGUMENT;
goto end;
}
if (j < rdat_count)
{
vkd3d_array_reserve((void**)subobjects, subobjects_size,
*subobjects_count + 1, sizeof(**subobjects));
subobject = &(*subobjects)[*subobjects_count];
vkd3d_shader_dxil_copy_subobject(identifier, subobject, &sub);
*subobjects_count += 1;
}
else
{
stage = dxil_spv_parsed_blob_get_shader_stage_for_entry(blob, ascii_entry);
if (stage == DXIL_SPV_STAGE_UNKNOWN)
{
ret = VKD3D_ERROR_INVALID_ARGUMENT;
goto end;
}
new_entry.real_entry_point = ascii_entry;
new_entry.plain_entry_point = vkd3d_wstrdup(library_desc->pExports[i].Name);
new_entry.mangled_entry_point = NULL;
new_entry.identifier = identifier;
new_entry.stage = convert_stage(stage);
ascii_entry = NULL;
new_entry.real_entry_point = ascii_entry;
new_entry.plain_entry_point = vkd3d_wstrdup(library_desc->pExports[i].Name);
new_entry.mangled_entry_point = NULL;
new_entry.identifier = identifier;
new_entry.stage = convert_stage(stage);
ascii_entry = NULL;
vkd3d_array_reserve((void**)entry_points, entry_point_size,
*entry_point_count + 1, sizeof(new_entry));
(*entry_points)[(*entry_point_count)++] = new_entry;
memset(&new_entry, 0, sizeof(new_entry));
}
vkd3d_array_reserve((void**)entry_points, entry_point_size,
*entry_point_count + 1, sizeof(new_entry));
(*entry_points)[(*entry_point_count)++] = new_entry;
memset(&new_entry, 0, sizeof(new_entry));
}
}
else
@ -1611,21 +1489,6 @@ int vkd3d_shader_dxil_append_library_entry_points_and_subobjects(
(*entry_points)[(*entry_point_count)++] = new_entry;
memset(&new_entry, 0, sizeof(new_entry));
}
if (rdat_count)
{
/* All subobjects are also exported. */
vkd3d_array_reserve((void**)subobjects, subobjects_size,
*subobjects_count + rdat_count, sizeof(**subobjects));
for (i = 0; i < rdat_count; i++)
{
dxil_spv_parsed_blob_get_rdat_subobject(blob, i, &sub);
subobject = &(*subobjects)[*subobjects_count];
vkd3d_shader_dxil_copy_subobject(identifier, subobject, &sub);
*subobjects_count += 1;
}
}
}
end:

View File

@ -1404,13 +1404,6 @@ static uint32_t vkd3d_spirv_build_op_logical_and(struct vkd3d_spirv_builder *bui
SpvOpLogicalAnd, result_type, operand0, operand1);
}
static uint32_t vkd3d_spirv_build_op_any(struct vkd3d_spirv_builder *builder,
uint32_t result_type, uint32_t operand0)
{
return vkd3d_spirv_build_op_tr1(builder, &builder->function_stream,
SpvOpAny, result_type, operand0);
}
static uint32_t vkd3d_spirv_build_op_iequal(struct vkd3d_spirv_builder *builder,
uint32_t result_type, uint32_t operand0, uint32_t operand1)
{
@ -2330,8 +2323,6 @@ struct vkd3d_dxbc_compiler
vkd3d_shader_hash_t descriptor_qa_shader_hash;
#endif
uint32_t robust_physical_counter_func_id;
int compiler_error;
};
@ -5531,22 +5522,31 @@ static const struct vkd3d_shader_global_binding *vkd3d_dxbc_compiler_get_global_
{
if (binding->flags & VKD3D_SHADER_BINDING_FLAG_RAW_VA)
{
uint32_t struct_id, array_type_id;
uint32_t counter_struct_id, pointer_struct_id, array_type_id;
counter_struct_id = vkd3d_spirv_get_type_id(builder, VKD3D_TYPE_UINT, 1);
counter_struct_id = vkd3d_spirv_build_op_type_struct(builder, &counter_struct_id, 1);
vkd3d_spirv_build_op_member_decorate1(builder, counter_struct_id, 0, SpvDecorationOffset, 0);
vkd3d_spirv_build_op_decorate(builder, counter_struct_id, SpvDecorationBlock, NULL, 0);
vkd3d_spirv_build_op_name(builder, counter_struct_id, "uav_ctr_t");
type_id = vkd3d_spirv_build_op_type_pointer(builder, SpvStorageClassPhysicalStorageBuffer, counter_struct_id);
type_id = vkd3d_spirv_get_type_id(builder, VKD3D_TYPE_UINT, 2);
array_type_id = vkd3d_spirv_build_op_type_runtime_array(builder, type_id);
vkd3d_spirv_build_op_decorate1(builder, array_type_id, SpvDecorationArrayStride, sizeof(uint64_t));
struct_id = vkd3d_spirv_build_op_type_struct(builder, &array_type_id, 1);
vkd3d_spirv_build_op_member_decorate1(builder, struct_id, 0, SpvDecorationOffset, 0);
vkd3d_spirv_build_op_member_decorate(builder, struct_id, 0, SpvDecorationNonWritable, NULL, 0);
vkd3d_spirv_build_op_decorate(builder, struct_id, SpvDecorationBufferBlock, NULL, 0);
vkd3d_spirv_build_op_name(builder, struct_id, "uav_ctrs_t");
pointer_struct_id = vkd3d_spirv_build_op_type_struct(builder, &array_type_id, 1);
vkd3d_spirv_build_op_member_decorate1(builder, pointer_struct_id, 0, SpvDecorationOffset, 0);
vkd3d_spirv_build_op_decorate(builder, pointer_struct_id, SpvDecorationBufferBlock, NULL, 0);
vkd3d_spirv_build_op_name(builder, pointer_struct_id, "uav_ctrs_t");
var_id = vkd3d_spirv_build_op_variable(builder, &builder->global_stream,
vkd3d_spirv_get_op_type_pointer(builder, storage_class, struct_id),
vkd3d_spirv_get_op_type_pointer(builder, storage_class, pointer_struct_id),
storage_class, 0);
vkd3d_spirv_build_op_decorate(builder, var_id, SpvDecorationAliasedPointer, NULL, 0);
vkd3d_spirv_enable_capability(builder, SpvCapabilityPhysicalStorageBufferAddresses);
}
else
@ -5719,116 +5719,10 @@ static const struct vkd3d_shader_buffer_reference_type *vkd3d_dxbc_compiler_get_
static void vkd3d_dxbc_compiler_emit_descriptor_qa_checks(struct vkd3d_dxbc_compiler *compiler);
#endif
static void vkd3d_dxbc_compiler_emit_robust_physical_counter_func(struct vkd3d_dxbc_compiler *compiler)
{
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
uint32_t not_equal_vec_id, not_equal_id;
uint32_t merge_label_id, body_label_id;
uint32_t ptr_type_id, ptr_id;
uint32_t parameter_types[3];
uint32_t parameter_ids[3];
uint32_t phi_arguments[4];
uint32_t atomic_args[4];
uint32_t func_type_id;
uint32_t phi_result_id;
uint32_t uvec2_type;
uint32_t bvec2_type;
uint32_t result_id;
uint32_t bool_type;
uint32_t u32_type;
uint32_t label_id;
uint32_t zero_id;
unsigned int i;
bool_type = vkd3d_spirv_get_type_id(builder, VKD3D_TYPE_BOOL, 1);
bvec2_type = vkd3d_spirv_get_type_id(builder, VKD3D_TYPE_BOOL, 2);
u32_type = vkd3d_spirv_get_type_id(builder, VKD3D_TYPE_UINT, 1);
uvec2_type = vkd3d_spirv_get_type_id(builder, VKD3D_TYPE_UINT, 2);
for (i = 0; i < ARRAY_SIZE(parameter_types); i++)
parameter_types[i] = i == 0 ? uvec2_type : u32_type;
func_type_id = vkd3d_spirv_get_op_type_function(builder, u32_type,
parameter_types, ARRAY_SIZE(parameter_types));
compiler->robust_physical_counter_func_id = vkd3d_spirv_alloc_id(builder);
vkd3d_spirv_build_op_name(builder, compiler->robust_physical_counter_func_id, "robust_physical_counter_op");
vkd3d_spirv_build_op_function(builder, u32_type, compiler->robust_physical_counter_func_id,
SpvFunctionControlMaskNone, func_type_id);
for (i = 0; i < ARRAY_SIZE(parameter_ids); i++)
parameter_ids[i] = vkd3d_spirv_build_op_function_parameter(builder, i == 0 ? uvec2_type : u32_type);
vkd3d_spirv_build_op_name(builder, parameter_ids[0], "bda");
vkd3d_spirv_build_op_name(builder, parameter_ids[1], "direction");
vkd3d_spirv_build_op_name(builder, parameter_ids[2], "fixup");
label_id = vkd3d_spirv_alloc_id(builder);
merge_label_id = vkd3d_spirv_alloc_id(builder);
body_label_id = vkd3d_spirv_alloc_id(builder);
zero_id = vkd3d_dxbc_compiler_get_constant_uint_vector(compiler, 0, 2);
vkd3d_spirv_build_op_label(builder, label_id);
not_equal_vec_id = vkd3d_spirv_build_op_inotequal(builder, bvec2_type,
parameter_ids[0], zero_id);
not_equal_id = vkd3d_spirv_build_op_any(builder, bool_type, not_equal_vec_id);
vkd3d_spirv_build_op_selection_merge(builder, merge_label_id, SpvSelectionControlMaskNone);
vkd3d_spirv_build_op_branch_conditional(builder, not_equal_id, body_label_id, merge_label_id);
phi_arguments[1] = body_label_id;
phi_arguments[2] = vkd3d_dxbc_compiler_get_constant_uint(compiler, 0);
phi_arguments[3] = label_id;
{
vkd3d_spirv_build_op_label(builder, body_label_id);
ptr_type_id = vkd3d_spirv_get_op_type_pointer(builder, SpvStorageClassPhysicalStorageBuffer, u32_type);
ptr_id = vkd3d_spirv_build_op_bitcast(builder, ptr_type_id, parameter_ids[0]);
atomic_args[0] = ptr_id;
atomic_args[1] = vkd3d_dxbc_compiler_get_constant_uint(compiler, SpvScopeDevice);
atomic_args[2] = vkd3d_dxbc_compiler_get_constant_uint(compiler, SpvMemoryAccessMaskNone);
atomic_args[3] = parameter_ids[1];
result_id = vkd3d_spirv_build_op_trv(builder, &builder->function_stream,
SpvOpAtomicIAdd, u32_type,
atomic_args, ARRAY_SIZE(atomic_args));
phi_arguments[0] = vkd3d_spirv_build_op_iadd(builder, u32_type,
result_id, parameter_ids[2]);
vkd3d_spirv_build_op_branch(builder, merge_label_id);
}
vkd3d_spirv_build_op_label(builder, merge_label_id);
phi_result_id = vkd3d_spirv_build_op_trv(builder, &builder->function_stream,
SpvOpPhi, u32_type,
phi_arguments, ARRAY_SIZE(phi_arguments));
vkd3d_spirv_build_op_return_value(builder, phi_result_id);
vkd3d_spirv_build_op_function_end(builder);
vkd3d_spirv_enable_capability(builder, SpvCapabilityPhysicalStorageBufferAddresses);
}
static uint32_t vkd3d_dxbc_compiler_emit_robust_physical_counter(struct vkd3d_dxbc_compiler *compiler,
uint32_t bda_id, bool increment)
{
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
uint32_t u32_type;
uint32_t args[3];
u32_type = vkd3d_spirv_get_type_id(builder, VKD3D_TYPE_UINT, 1);
args[0] = bda_id;
args[1] = vkd3d_dxbc_compiler_get_constant_uint(compiler, increment ? 1u : -1u);
args[2] = vkd3d_dxbc_compiler_get_constant_uint(compiler, increment ? 0u : -1u);
return vkd3d_spirv_build_op_function_call(builder, u32_type,
compiler->robust_physical_counter_func_id,
args, ARRAY_SIZE(args));
}
static void vkd3d_dxbc_compiler_emit_initial_declarations(struct vkd3d_dxbc_compiler *compiler)
{
const struct vkd3d_shader_transform_feedback_info *xfb_info = compiler->shader_interface.xfb_info;
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
unsigned int i;
switch (compiler->shader_type)
{
@ -5878,19 +5772,6 @@ static void vkd3d_dxbc_compiler_emit_initial_declarations(struct vkd3d_dxbc_comp
vkd3d_dxbc_compiler_emit_descriptor_qa_checks(compiler);
#endif
if (compiler->scan_info->has_uav_counter)
{
/* Check if we're expected to deal with RAW VAs. In this case we will enable BDA. */
for (i = 0; i < compiler->shader_interface.binding_count; i++)
{
if (compiler->shader_interface.bindings[i].flags & VKD3D_SHADER_BINDING_FLAG_RAW_VA)
{
vkd3d_dxbc_compiler_emit_robust_physical_counter_func(compiler);
break;
}
}
}
if (compiler->shader_type != VKD3D_SHADER_TYPE_HULL)
{
vkd3d_spirv_builder_begin_main_function(builder);
@ -10138,7 +10019,6 @@ static void vkd3d_dxbc_compiler_emit_uav_counter_instruction(struct vkd3d_dxbc_c
const struct vkd3d_shader_resource_binding *binding;
uint32_t type_id, result_id, pointer_id, zero_id;
const struct vkd3d_symbol *resource_symbol;
bool check_post_decrement;
uint32_t operands[3];
SpvOp op;
@ -10154,6 +10034,7 @@ static void vkd3d_dxbc_compiler_emit_uav_counter_instruction(struct vkd3d_dxbc_c
if (binding && (binding->flags & VKD3D_SHADER_BINDING_FLAG_RAW_VA))
{
uint32_t ctr_ptr_type_id = vkd3d_spirv_get_op_type_pointer(builder, SpvStorageClassPhysicalStorageBuffer, type_id);
uint32_t buf_ptr_type_id = vkd3d_spirv_get_op_type_pointer(builder, SpvStorageClassUniform, resource_symbol->info.resource.uav_counter_type_id);
uint32_t indices[2];
@ -10168,10 +10049,8 @@ static void vkd3d_dxbc_compiler_emit_uav_counter_instruction(struct vkd3d_dxbc_c
resource_symbol->info.resource.uav_counter_type_id,
pointer_id, SpvMemoryAccessMaskNone);
result_id = vkd3d_dxbc_compiler_emit_robust_physical_counter(compiler, pointer_id,
instruction->handler_idx == VKD3DSIH_IMM_ATOMIC_ALLOC);
check_post_decrement = false;
pointer_id = vkd3d_spirv_build_op_access_chain1(builder,
ctr_ptr_type_id, pointer_id, zero_id);
}
else if (binding && (binding->flags & VKD3D_SHADER_BINDING_FLAG_BINDLESS))
{
@ -10191,8 +10070,6 @@ static void vkd3d_dxbc_compiler_emit_uav_counter_instruction(struct vkd3d_dxbc_c
/* Need to mark the pointer argument itself as non-uniform. */
if (src->reg.modifier == VKD3DSPRM_NONUNIFORM)
vkd3d_dxbc_compiler_decorate_nonuniform(compiler, pointer_id);
check_post_decrement = true;
}
else
{
@ -10200,25 +10077,19 @@ static void vkd3d_dxbc_compiler_emit_uav_counter_instruction(struct vkd3d_dxbc_c
pointer_id = vkd3d_spirv_build_op_image_texel_pointer(builder, ptr_type_id,
resource_symbol->info.resource.uav_counter_id, zero_id, zero_id);
check_post_decrement = true;
}
if (check_post_decrement)
operands[0] = pointer_id;
operands[1] = vkd3d_dxbc_compiler_get_constant_uint(compiler, SpvScopeDevice);
operands[2] = vkd3d_dxbc_compiler_get_constant_uint(compiler, memory_semantics);
result_id = vkd3d_spirv_build_op_trv(builder, &builder->function_stream,
op, type_id, operands, ARRAY_SIZE(operands));
if (op == SpvOpAtomicIDecrement)
{
operands[0] = pointer_id;
operands[1] = vkd3d_dxbc_compiler_get_constant_uint(compiler, SpvScopeDevice);
operands[2] = vkd3d_dxbc_compiler_get_constant_uint(compiler, memory_semantics);
result_id = vkd3d_spirv_build_op_trv(builder, &builder->function_stream,
op, type_id, operands, ARRAY_SIZE(operands));
if (op == SpvOpAtomicIDecrement)
{
/* SpvOpAtomicIDecrement returns the original value. */
result_id = vkd3d_spirv_build_op_isub(builder, type_id, result_id,
vkd3d_dxbc_compiler_get_constant_uint(compiler, 1));
}
/* SpvOpAtomicIDecrement returns the original value. */
result_id = vkd3d_spirv_build_op_isub(builder, type_id, result_id,
vkd3d_dxbc_compiler_get_constant_uint(compiler, 1));
}
vkd3d_dxbc_compiler_emit_store_dst(compiler, dst, result_id);
}

View File

@ -477,7 +477,6 @@ static void vkd3d_shader_scan_record_uav_counter(struct vkd3d_shader_scan_info *
const struct vkd3d_shader_register *reg)
{
scan_info->has_side_effects = true;
scan_info->has_uav_counter = true;
vkd3d_shader_scan_set_register_flags(scan_info, VKD3DSPR_UAV,
reg->idx[0].offset, VKD3D_SHADER_UAV_FLAG_ATOMIC_COUNTER);
}

View File

@ -19,8 +19,6 @@
#define VKD3D_DBG_CHANNEL VKD3D_DBG_CHANNEL_API
#include "vkd3d_private.h"
#define RT_TRACE TRACE
void vkd3d_acceleration_structure_build_info_cleanup(
struct vkd3d_acceleration_structure_build_info *info)
{
@ -76,31 +74,19 @@ bool vkd3d_acceleration_structure_convert_inputs(const struct d3d12_device *devi
bool have_triangles, have_aabbs;
unsigned int i;
RT_TRACE("Converting inputs.\n");
RT_TRACE("=====================\n");
build_info = &info->build_info;
memset(build_info, 0, sizeof(*build_info));
build_info->sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR;
if (desc->Type == D3D12_RAYTRACING_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL)
{
build_info->type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR;
RT_TRACE("Top level build.\n");
}
else
{
build_info->type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR;
RT_TRACE("Bottom level build.\n");
}
build_info->flags = d3d12_build_flags_to_vk(desc->Flags);
if (desc->Flags & D3D12_RAYTRACING_ACCELERATION_STRUCTURE_BUILD_FLAG_PERFORM_UPDATE)
{
RT_TRACE("BUILD_FLAG_PERFORM_UPDATE.\n");
build_info->mode = VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR;
}
else
build_info->mode = VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR;
@ -123,9 +109,6 @@ bool vkd3d_acceleration_structure_convert_inputs(const struct d3d12_device *devi
info->primitive_counts = info->primitive_counts_stack;
info->primitive_counts[0] = desc->NumDescs;
build_info->geometryCount = 1;
RT_TRACE(" ArrayOfPointers: %u.\n",
desc->DescsLayout == D3D12_ELEMENTS_LAYOUT_ARRAY_OF_POINTERS ? 1 : 0);
RT_TRACE(" NumDescs: %u.\n", info->primitive_counts[0]);
}
else
{
@ -149,21 +132,13 @@ bool vkd3d_acceleration_structure_convert_inputs(const struct d3d12_device *devi
for (i = 0; i < desc->NumDescs; i++)
{
info->geometries[i].sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR;
RT_TRACE(" Geom %u:\n", i);
if (desc->DescsLayout == D3D12_ELEMENTS_LAYOUT_ARRAY_OF_POINTERS)
{
geom_desc = desc->ppGeometryDescs[i];
RT_TRACE(" ArrayOfPointers\n");
}
else
{
geom_desc = &desc->pGeometryDescs[i];
RT_TRACE(" PointerToArray\n");
}
info->geometries[i].flags = d3d12_geometry_flags_to_vk(geom_desc->Flags);
RT_TRACE(" Flags = #%x\n", geom_desc->Flags);
switch (geom_desc->Type)
{
@ -180,26 +155,17 @@ bool vkd3d_acceleration_structure_convert_inputs(const struct d3d12_device *devi
triangles = &info->geometries[i].geometry.triangles;
triangles->sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR;
triangles->indexData.deviceAddress = geom_desc->Triangles.IndexBuffer;
if (geom_desc->Triangles.IndexFormat != DXGI_FORMAT_UNKNOWN)
if (geom_desc->Triangles.IndexBuffer)
{
if (!geom_desc->Triangles.IndexBuffer)
WARN("Application is using IndexBuffer = 0 and IndexFormat != UNKNOWN. Likely application bug.\n");
triangles->indexType =
geom_desc->Triangles.IndexFormat == DXGI_FORMAT_R16_UINT ?
VK_INDEX_TYPE_UINT16 : VK_INDEX_TYPE_UINT32;
info->primitive_counts[i] = geom_desc->Triangles.IndexCount / 3;
RT_TRACE(" Indexed : Index count = %u (%u bits)\n",
geom_desc->Triangles.IndexCount,
triangles->indexType == VK_INDEX_TYPE_UINT16 ? 16 : 32);
RT_TRACE(" Vertex count: %u\n", geom_desc->Triangles.VertexCount);
RT_TRACE(" IBO VA: %"PRIx64".\n", geom_desc->Triangles.IndexBuffer);
}
else
{
info->primitive_counts[i] = geom_desc->Triangles.VertexCount / 3;
triangles->indexType = VK_INDEX_TYPE_NONE_KHR;
RT_TRACE(" Triangle list : Vertex count: %u\n", geom_desc->Triangles.VertexCount);
}
triangles->maxVertex = max(1, geom_desc->Triangles.VertexCount) - 1;
@ -207,11 +173,6 @@ bool vkd3d_acceleration_structure_convert_inputs(const struct d3d12_device *devi
triangles->vertexFormat = vkd3d_internal_get_vk_format(device, geom_desc->Triangles.VertexFormat);
triangles->vertexData.deviceAddress = geom_desc->Triangles.VertexBuffer.StartAddress;
triangles->transformData.deviceAddress = geom_desc->Triangles.Transform3x4;
RT_TRACE(" Transform3x4: %s\n", geom_desc->Triangles.Transform3x4 ? "on" : "off");
RT_TRACE(" Vertex format: %s\n", debug_dxgi_format(geom_desc->Triangles.VertexFormat));
RT_TRACE(" VBO VA: %"PRIx64"\n", geom_desc->Triangles.VertexBuffer.StartAddress);
RT_TRACE(" Vertex stride: %"PRIu64" bytes\n", geom_desc->Triangles.VertexBuffer.StrideInBytes);
break;
case D3D12_RAYTRACING_GEOMETRY_TYPE_PROCEDURAL_PRIMITIVE_AABBS:
@ -229,15 +190,12 @@ bool vkd3d_acceleration_structure_convert_inputs(const struct d3d12_device *devi
aabbs->stride = geom_desc->AABBs.AABBs.StrideInBytes;
aabbs->data.deviceAddress = geom_desc->AABBs.AABBs.StartAddress;
info->primitive_counts[i] = geom_desc->AABBs.AABBCount;
RT_TRACE(" AABB stride: %"PRIu64" bytes\n", geom_desc->AABBs.AABBs.StrideInBytes);
break;
default:
FIXME("Unsupported geometry type %u.\n", geom_desc->Type);
return false;
}
RT_TRACE(" Primitive count %u.\n", info->primitive_counts[i]);
}
}
@ -251,8 +209,6 @@ bool vkd3d_acceleration_structure_convert_inputs(const struct d3d12_device *devi
}
build_info->pGeometries = info->geometries;
RT_TRACE("=====================\n");
return true;
}

View File

@ -50,8 +50,6 @@ static const char *vkd3d_breadcrumb_command_type_to_str(enum vkd3d_breadcrumb_co
return "dispatch";
case VKD3D_BREADCRUMB_COMMAND_EXECUTE_INDIRECT:
return "execute_indirect";
case VKD3D_BREADCRUMB_COMMAND_EXECUTE_INDIRECT_TEMPLATE:
return "execute_indirect_template";
case VKD3D_BREADCRUMB_COMMAND_COPY:
return "copy";
case VKD3D_BREADCRUMB_COMMAND_RESOLVE:
@ -84,8 +82,6 @@ static const char *vkd3d_breadcrumb_command_type_to_str(enum vkd3d_breadcrumb_co
return "root_desc";
case VKD3D_BREADCRUMB_COMMAND_ROOT_CONST:
return "root_const";
case VKD3D_BREADCRUMB_COMMAND_TAG:
return "tag";
default:
return "?";
@ -310,10 +306,6 @@ static void vkd3d_breadcrumb_tracer_report_command_list(
{
ERR(" Set arg: %"PRIu64" (#%"PRIx64")\n", cmd->word_64bit, cmd->word_64bit);
}
else if (cmd->type == VKD3D_BREADCRUMB_COMMAND_TAG)
{
ERR(" Tag: %s\n", cmd->tag);
}
else
{
ERR(" Command: %s\n", vkd3d_breadcrumb_command_type_to_str(cmd->type));

File diff suppressed because it is too large Load Diff

View File

@ -61,56 +61,12 @@ void vkd3d_shader_debug_ring_init_spec_constant(struct d3d12_device *device,
#define DEBUG_CHANNEL_WORD_COOKIE 0xdeadca70u
#define DEBUG_CHANNEL_WORD_MASK 0xfffffff0u
static const char *vkd3d_patch_command_token_str(enum vkd3d_patch_command_token token)
{
switch (token)
{
case VKD3D_PATCH_COMMAND_TOKEN_COPY_CONST_U32: return "RootConst";
case VKD3D_PATCH_COMMAND_TOKEN_COPY_IBO_VA_LO: return "IBO VA LO";
case VKD3D_PATCH_COMMAND_TOKEN_COPY_IBO_VA_HI: return "IBO VA HI";
case VKD3D_PATCH_COMMAND_TOKEN_COPY_IBO_SIZE: return "IBO Size";
case VKD3D_PATCH_COMMAND_TOKEN_COPY_INDEX_FORMAT: return "IBO Type";
case VKD3D_PATCH_COMMAND_TOKEN_COPY_VBO_VA_LO: return "VBO VA LO";
case VKD3D_PATCH_COMMAND_TOKEN_COPY_VBO_VA_HI: return "VBO VA HI";
case VKD3D_PATCH_COMMAND_TOKEN_COPY_VBO_SIZE: return "VBO Size";
case VKD3D_PATCH_COMMAND_TOKEN_COPY_VBO_STRIDE: return "VBO Stride";
case VKD3D_PATCH_COMMAND_TOKEN_COPY_ROOT_VA_LO: return "ROOT VA LO";
case VKD3D_PATCH_COMMAND_TOKEN_COPY_ROOT_VA_HI: return "ROOT VA HI";
case VKD3D_PATCH_COMMAND_TOKEN_COPY_VERTEX_COUNT: return "Vertex Count";
case VKD3D_PATCH_COMMAND_TOKEN_COPY_INDEX_COUNT: return "Index Count";
case VKD3D_PATCH_COMMAND_TOKEN_COPY_INSTANCE_COUNT: return "Instance Count";
case VKD3D_PATCH_COMMAND_TOKEN_COPY_FIRST_INDEX: return "First Index";
case VKD3D_PATCH_COMMAND_TOKEN_COPY_FIRST_VERTEX: return "First Vertex";
case VKD3D_PATCH_COMMAND_TOKEN_COPY_FIRST_INSTANCE: return "First Instance";
case VKD3D_PATCH_COMMAND_TOKEN_COPY_VERTEX_OFFSET: return "Vertex Offset";
default: return "???";
}
}
static bool vkd3d_patch_command_token_is_hex(enum vkd3d_patch_command_token token)
{
switch (token)
{
case VKD3D_PATCH_COMMAND_TOKEN_COPY_IBO_VA_LO:
case VKD3D_PATCH_COMMAND_TOKEN_COPY_IBO_VA_HI:
case VKD3D_PATCH_COMMAND_TOKEN_COPY_VBO_VA_LO:
case VKD3D_PATCH_COMMAND_TOKEN_COPY_VBO_VA_HI:
case VKD3D_PATCH_COMMAND_TOKEN_COPY_ROOT_VA_LO:
case VKD3D_PATCH_COMMAND_TOKEN_COPY_ROOT_VA_HI:
return true;
default:
return false;
}
}
static bool vkd3d_shader_debug_ring_print_message(struct vkd3d_shader_debug_ring *ring,
uint32_t word_offset, uint32_t message_word_count)
{
uint32_t i, debug_instance, debug_thread_id[3], fmt;
char message_buffer[4096];
uint64_t shader_hash;
size_t len, avail;
if (message_word_count < 8)
{
@ -124,107 +80,52 @@ static bool vkd3d_shader_debug_ring_print_message(struct vkd3d_shader_debug_ring
debug_thread_id[i] = READ_RING_WORD(word_offset + 4 + i);
fmt = READ_RING_WORD(word_offset + 7);
snprintf(message_buffer, sizeof(message_buffer), "Shader: %"PRIx64": Instance %u, ID (%u, %u, %u):",
shader_hash, debug_instance,
debug_thread_id[0], debug_thread_id[1], debug_thread_id[2]);
word_offset += 8;
message_word_count -= 8;
if (shader_hash == 0)
for (i = 0; i < message_word_count; i++)
{
/* We got this from our internal debug shaders. Pretty-print.
* Make sure the log is sortable for easier debug.
* TODO: Might consider a callback system that listeners from different subsystems can listen to and print their own messages,
* but that is overengineering at this time ... */
snprintf(message_buffer, sizeof(message_buffer), "ExecuteIndirect: GlobalCommandIndex %010u, Debug tag %010u, DrawID %04u (ThreadID %04u): ",
debug_instance, debug_thread_id[0], debug_thread_id[1], debug_thread_id[2]);
if (message_word_count == 2)
union
{
len = strlen(message_buffer);
avail = sizeof(message_buffer) - len;
snprintf(message_buffer + len, avail, "DrawCount %u, MaxDrawCount %u",
READ_RING_WORD(word_offset + 0),
READ_RING_WORD(word_offset + 1));
}
else if (message_word_count == 4)
{
union { uint32_t u32; float f32; int32_t s32; } value;
enum vkd3d_patch_command_token token;
uint32_t dst_offset;
uint32_t src_offset;
float f32;
uint32_t u32;
int32_t i32;
} u;
const char *delim;
size_t len, avail;
u.u32 = READ_RING_WORD(word_offset + i);
len = strlen(message_buffer);
avail = sizeof(message_buffer) - len;
len = strlen(message_buffer);
if (len + 1 >= sizeof(message_buffer))
break;
avail = sizeof(message_buffer) - len;
token = READ_RING_WORD(word_offset + 0);
dst_offset = READ_RING_WORD(word_offset + 1);
src_offset = READ_RING_WORD(word_offset + 2);
value.u32 = READ_RING_WORD(word_offset + 3);
if (vkd3d_patch_command_token_is_hex(token))
{
snprintf(message_buffer + len, avail, "%s <- #%08x",
vkd3d_patch_command_token_str(token), value.u32);
}
else if (token == VKD3D_PATCH_COMMAND_TOKEN_COPY_CONST_U32)
{
snprintf(message_buffer + len, avail, "%s <- {hex #%08x, s32 %d, f32 %f}",
vkd3d_patch_command_token_str(token), value.u32, value.s32, value.f32);
}
else
{
snprintf(message_buffer + len, avail, "%s <- %d",
vkd3d_patch_command_token_str(token), value.s32);
}
len = strlen(message_buffer);
avail = sizeof(message_buffer) - len;
snprintf(message_buffer + len, avail, " (dst offset %u, src offset %u)", dst_offset, src_offset);
}
}
else
{
snprintf(message_buffer, sizeof(message_buffer), "Shader: %"PRIx64": Instance %010u, ID (%u, %u, %u):",
shader_hash, debug_instance,
debug_thread_id[0], debug_thread_id[1], debug_thread_id[2]);
for (i = 0; i < message_word_count; i++)
{
union
{
float f32;
uint32_t u32;
int32_t i32;
} u;
const char *delim;
u.u32 = READ_RING_WORD(word_offset + i);
len = strlen(message_buffer);
if (len + 1 >= sizeof(message_buffer))
break;
avail = sizeof(message_buffer) - len;
delim = i == 0 ? " " : ", ";
delim = i == 0 ? " " : ", ";
#define VKD3D_DEBUG_CHANNEL_FMT_HEX 0u
#define VKD3D_DEBUG_CHANNEL_FMT_I32 1u
#define VKD3D_DEBUG_CHANNEL_FMT_F32 2u
switch ((fmt >> (2u * i)) & 3u)
{
case VKD3D_DEBUG_CHANNEL_FMT_HEX:
snprintf(message_buffer + len, avail, "%s#%x", delim, u.u32);
break;
switch ((fmt >> (2u * i)) & 3u)
{
case VKD3D_DEBUG_CHANNEL_FMT_HEX:
snprintf(message_buffer + len, avail, "%s#%x", delim, u.u32);
break;
case VKD3D_DEBUG_CHANNEL_FMT_I32:
snprintf(message_buffer + len, avail, "%s%d", delim, u.i32);
break;
case VKD3D_DEBUG_CHANNEL_FMT_I32:
snprintf(message_buffer + len, avail, "%s%d", delim, u.i32);
break;
case VKD3D_DEBUG_CHANNEL_FMT_F32:
snprintf(message_buffer + len, avail, "%s%f", delim, u.f32);
break;
case VKD3D_DEBUG_CHANNEL_FMT_F32:
snprintf(message_buffer + len, avail, "%s%f", delim, u.f32);
break;
default:
snprintf(message_buffer + len, avail, "%s????", delim);
break;
}
default:
snprintf(message_buffer + len, avail, "%s????", delim);
break;
}
}

View File

@ -90,10 +90,6 @@ static const struct vkd3d_optional_extension_info optional_device_extensions[] =
VK_EXTENSION(KHR_UNIFORM_BUFFER_STANDARD_LAYOUT, KHR_uniform_buffer_standard_layout),
VK_EXTENSION(KHR_MAINTENANCE_4, KHR_maintenance4),
VK_EXTENSION(KHR_FRAGMENT_SHADER_BARYCENTRIC, KHR_fragment_shader_barycentric),
#ifdef _WIN32
VK_EXTENSION(KHR_EXTERNAL_MEMORY_WIN32, KHR_external_memory_win32),
VK_EXTENSION(KHR_EXTERNAL_SEMAPHORE_WIN32, KHR_external_semaphore_win32),
#endif
/* EXT extensions */
VK_EXTENSION(EXT_CALIBRATED_TIMESTAMPS, EXT_calibrated_timestamps),
VK_EXTENSION(EXT_CONDITIONAL_RENDERING, EXT_conditional_rendering),
@ -487,14 +483,6 @@ static void vkd3d_init_debug_messenger_callback(struct vkd3d_instance *instance)
instance->vk_debug_callback = callback;
}
/* Could be a flag style enum if needed. */
enum vkd3d_application_feature_override
{
VKD3D_APPLICATION_FEATURE_OVERRIDE_NONE = 0,
VKD3D_APPLICATION_FEATURE_OVERRIDE_PROMOTE_DXR_TO_ULTIMATE,
};
static enum vkd3d_application_feature_override vkd3d_application_feature_override;
uint64_t vkd3d_config_flags;
struct vkd3d_shader_quirk_info vkd3d_shader_quirk_info;
@ -504,7 +492,6 @@ struct vkd3d_instance_application_meta
const char *name;
uint64_t global_flags_add;
uint64_t global_flags_remove;
enum vkd3d_application_feature_override override;
};
static const struct vkd3d_instance_application_meta application_override[] = {
/* MSVC fails to compile empty array. */
@ -515,8 +502,7 @@ static const struct vkd3d_instance_application_meta application_override[] = {
* This works okay with zerovram on first game boot, but not later, since this memory is guaranteed to be recycled.
* Game also relies on indirectly modifying CBV root descriptors, which means we are forced to rely on RAW_VA_CBV. */
{ VKD3D_STRING_COMPARE_EXACT, "HaloInfinite.exe",
VKD3D_CONFIG_FLAG_ZERO_MEMORY_WORKAROUNDS_COMMITTED_BUFFER_UAV | VKD3D_CONFIG_FLAG_FORCE_RAW_VA_CBV |
VKD3D_CONFIG_FLAG_USE_HOST_IMPORT_FALLBACK, 0 },
VKD3D_CONFIG_FLAG_ZERO_MEMORY_WORKAROUNDS_COMMITTED_BUFFER_UAV | VKD3D_CONFIG_FLAG_FORCE_RAW_VA_CBV, 0 },
/* Shadow of the Tomb Raider (750920).
* Invariant workarounds actually cause more issues than they resolve on NV.
* RADV already has workarounds by default.
@ -532,20 +518,6 @@ static const struct vkd3d_instance_application_meta application_override[] = {
/* Serious Sam 4 (257420).
* Invariant workarounds cause graphical glitches when rendering foliage on NV. */
{ VKD3D_STRING_COMPARE_EXACT, "Sam4.exe", VKD3D_CONFIG_FLAG_FORCE_NO_INVARIANT_POSITION, 0 },
/* Cyberpunk 2077 (1091500). */
{ VKD3D_STRING_COMPARE_EXACT, "Cyberpunk2077.exe", VKD3D_CONFIG_FLAG_ALLOW_SBT_COLLECTION, 0 },
/* Resident Evil: Village (1196590).
* Game relies on mesh + sampler feedback to be exposed to use DXR.
* Likely used as a proxy for Turing+ to avoid potential software fallbacks on Pascal. */
{ VKD3D_STRING_COMPARE_EXACT, "re8.exe",
VKD3D_CONFIG_FLAG_FORCE_NATIVE_FP16, 0, VKD3D_APPLICATION_FEATURE_OVERRIDE_PROMOTE_DXR_TO_ULTIMATE },
/* Resident Evil 2 remake (883710). Same as RE: Village. */
{ VKD3D_STRING_COMPARE_EXACT, "re2.exe",
VKD3D_CONFIG_FLAG_FORCE_NATIVE_FP16, 0, VKD3D_APPLICATION_FEATURE_OVERRIDE_PROMOTE_DXR_TO_ULTIMATE },
{ VKD3D_STRING_COMPARE_EXACT, "re3.exe",
VKD3D_CONFIG_FLAG_FORCE_NATIVE_FP16, 0, VKD3D_APPLICATION_FEATURE_OVERRIDE_PROMOTE_DXR_TO_ULTIMATE },
{ VKD3D_STRING_COMPARE_EXACT, "re7.exe",
VKD3D_CONFIG_FLAG_FORCE_NATIVE_FP16, 0, VKD3D_APPLICATION_FEATURE_OVERRIDE_PROMOTE_DXR_TO_ULTIMATE },
{ VKD3D_STRING_COMPARE_NEVER, NULL, 0, 0 }
};
@ -596,7 +568,6 @@ static void vkd3d_instance_apply_application_workarounds(void)
vkd3d_config_flags &= ~application_override[i].global_flags_remove;
INFO("Detected game %s, adding config 0x%"PRIx64", removing masks 0x%"PRIx64".\n",
app, application_override[i].global_flags_add, application_override[i].global_flags_remove);
vkd3d_application_feature_override = application_override[i].override;
break;
}
}
@ -693,7 +664,6 @@ static const struct vkd3d_debug_option vkd3d_config_options[] =
{"pipeline_library_app_cache", VKD3D_CONFIG_FLAG_PIPELINE_LIBRARY_APP_CACHE_ONLY},
{"shader_cache_sync", VKD3D_CONFIG_FLAG_SHADER_CACHE_SYNC},
{"force_raw_va_cbv", VKD3D_CONFIG_FLAG_FORCE_RAW_VA_CBV},
{"allow_sbt_collection", VKD3D_CONFIG_FLAG_ALLOW_SBT_COLLECTION},
};
static void vkd3d_config_flags_init_once(void)
@ -2905,6 +2875,8 @@ static void d3d12_device_destroy(struct d3d12_device *device)
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
size_t i, j;
vkd3d_descriptor_update_ring_cleanup(&device->descriptor_update_ring, device);
for (i = 0; i < VKD3D_SCRATCH_POOL_KIND_COUNT; i++)
for (j = 0; j < device->scratch_pools[i].scratch_buffer_count; j++)
d3d12_device_destroy_scratch_buffer(device, &device->scratch_pools[i].scratch_buffers[j]);
@ -4038,10 +4010,20 @@ static void STDMETHODCALLTYPE d3d12_device_CreateConstantBufferView(d3d12_device
const D3D12_CONSTANT_BUFFER_VIEW_DESC *desc, D3D12_CPU_DESCRIPTOR_HANDLE descriptor)
{
struct d3d12_device *device = impl_from_ID3D12Device(iface);
vkd3d_cpu_descriptor_va_t src = 0, dst = 0;
TRACE("iface %p, desc %p, descriptor %#lx.\n", iface, desc, descriptor.ptr);
if (descriptor.ptr & VKD3D_RESOURCE_DESC_DEFER_COPY_MASK)
{
dst = descriptor.ptr;
descriptor.ptr = src = vkd3d_descriptor_update_ring_allocate_scratch(&device->descriptor_update_ring);
}
d3d12_desc_create_cbv(descriptor.ptr, device, desc);
if (dst)
vkd3d_descriptor_update_ring_push_copy(&device->descriptor_update_ring, device, src, dst);
}
static void STDMETHODCALLTYPE d3d12_device_CreateShaderResourceView(d3d12_device_iface *iface,
@ -4049,11 +4031,21 @@ static void STDMETHODCALLTYPE d3d12_device_CreateShaderResourceView(d3d12_device
D3D12_CPU_DESCRIPTOR_HANDLE descriptor)
{
struct d3d12_device *device = impl_from_ID3D12Device(iface);
vkd3d_cpu_descriptor_va_t src = 0, dst = 0;
TRACE("iface %p, resource %p, desc %p, descriptor %#lx.\n",
iface, resource, desc, descriptor.ptr);
if (descriptor.ptr & VKD3D_RESOURCE_DESC_DEFER_COPY_MASK)
{
dst = descriptor.ptr;
descriptor.ptr = src = vkd3d_descriptor_update_ring_allocate_scratch(&device->descriptor_update_ring);
}
d3d12_desc_create_srv(descriptor.ptr, device, impl_from_ID3D12Resource(resource), desc);
if (dst)
vkd3d_descriptor_update_ring_push_copy(&device->descriptor_update_ring, device, src, dst);
}
VKD3D_THREAD_LOCAL struct D3D12_UAV_INFO *d3d12_uav_info = NULL;
@ -4065,12 +4057,19 @@ static void STDMETHODCALLTYPE d3d12_device_CreateUnorderedAccessView(d3d12_devic
VkImageViewAddressPropertiesNVX out_info = { VK_STRUCTURE_TYPE_IMAGE_VIEW_ADDRESS_PROPERTIES_NVX };
VkImageViewHandleInfoNVX imageViewHandleInfo = { VK_STRUCTURE_TYPE_IMAGE_VIEW_HANDLE_INFO_NVX };
const struct vkd3d_vk_device_procs *vk_procs;
vkd3d_cpu_descriptor_va_t src = 0, dst = 0;
VkResult vr;
struct d3d12_resource *d3d12_resource_ = impl_from_ID3D12Resource(resource);
struct d3d12_device *device = impl_from_ID3D12Device(iface);
TRACE("iface %p, resource %p, counter_resource %p, desc %p, descriptor %#lx.\n",
iface, resource, counter_resource, desc, descriptor.ptr);
if (descriptor.ptr & VKD3D_RESOURCE_DESC_DEFER_COPY_MASK)
{
dst = descriptor.ptr;
descriptor.ptr = src = vkd3d_descriptor_update_ring_allocate_scratch(&device->descriptor_update_ring);
}
d3d12_desc_create_uav(descriptor.ptr,
device, d3d12_resource_,
impl_from_ID3D12Resource(counter_resource), desc);
@ -4097,6 +4096,9 @@ static void STDMETHODCALLTYPE d3d12_device_CreateUnorderedAccessView(d3d12_devic
/* Set this to null so that subsequent calls to this API wont update the previous pointer. */
d3d12_uav_info = NULL;
}
if (dst)
vkd3d_descriptor_update_ring_push_copy(&device->descriptor_update_ring, device, src, dst);
}
static void STDMETHODCALLTYPE d3d12_device_CreateRenderTargetView(d3d12_device_iface *iface,
@ -4246,7 +4248,10 @@ static void STDMETHODCALLTYPE d3d12_device_CopyDescriptorsSimple(d3d12_device_if
const D3D12_CPU_DESCRIPTOR_HANDLE src_descriptor_range_offset,
D3D12_DESCRIPTOR_HEAP_TYPE descriptor_heap_type)
{
vkd3d_cpu_descriptor_va_t src, dst;
struct d3d12_device *device;
UINT i;
TRACE("iface %p, descriptor_count %u, dst_descriptor_range_offset %#lx, "
"src_descriptor_range_offset %#lx, descriptor_heap_type %#x.\n",
iface, descriptor_count, dst_descriptor_range_offset.ptr, src_descriptor_range_offset.ptr,
@ -4254,7 +4259,21 @@ static void STDMETHODCALLTYPE d3d12_device_CopyDescriptorsSimple(d3d12_device_if
device = unsafe_impl_from_ID3D12Device(iface);
if (descriptor_heap_type == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
if (descriptor_heap_type == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV &&
(dst_descriptor_range_offset.ptr & VKD3D_RESOURCE_DESC_DEFER_COPY_MASK))
{
src = src_descriptor_range_offset.ptr;
dst = dst_descriptor_range_offset.ptr;
for (i = 0; i < descriptor_count; i++)
{
vkd3d_descriptor_update_ring_push_copy(&device->descriptor_update_ring,
device, src, dst);
src += VKD3D_RESOURCE_DESC_INCREMENT;
dst += VKD3D_RESOURCE_DESC_INCREMENT;
}
}
else if (descriptor_heap_type == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
descriptor_heap_type == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER)
{
/* Fast and hot path. */
@ -4405,263 +4424,19 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreateSharedHandle(d3d12_device_if
ID3D12DeviceChild *object, const SECURITY_ATTRIBUTES *attributes, DWORD access,
const WCHAR *name, HANDLE *handle)
{
#ifdef _WIN32
struct d3d12_device *device = impl_from_ID3D12Device(iface);
const struct vkd3d_vk_device_procs *vk_procs;
struct DxvkSharedTextureMetadata metadata;
ID3D12Resource *resource_iface;
ID3D12Fence *fence_iface;
vk_procs = &device->vk_procs;
TRACE("iface %p, object %p, attributes %p, access %#x, name %s, handle %p\n",
FIXME("iface %p, object %p, attributes %p, access %#x, name %s, handle %p stub!\n",
iface, object, attributes, access, debugstr_w(name), handle);
if (SUCCEEDED(ID3D12DeviceChild_QueryInterface(object, &IID_ID3D12Resource, (void**)&resource_iface)))
{
struct d3d12_resource *resource = impl_from_ID3D12Resource(resource_iface);
VkMemoryGetWin32HandleInfoKHR win32_handle_info;
VkResult vr;
if (!(resource->heap_flags & D3D12_HEAP_FLAG_SHARED))
{
ID3D12Resource_Release(resource_iface);
return DXGI_ERROR_INVALID_CALL;
}
if (attributes)
FIXME("attributes %p not handled.\n", attributes);
if (access)
FIXME("access %#x not handled.\n", access);
if (name)
FIXME("name %s not handled.\n", debugstr_w(name));
win32_handle_info.sType = VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR;
win32_handle_info.pNext = NULL;
win32_handle_info.memory = resource->mem.device_allocation.vk_memory;
win32_handle_info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT;
vr = VK_CALL(vkGetMemoryWin32HandleKHR(device->vk_device, &win32_handle_info, handle));
if (vr == VK_SUCCESS)
{
if (resource->desc.Dimension != D3D12_RESOURCE_DIMENSION_TEXTURE2D)
{
FIXME("Shared texture metadata structure only supports 2D textures.");
}
else
{
metadata.Width = resource->desc.Width;
metadata.Height = resource->desc.Height;
metadata.MipLevels = resource->desc.MipLevels;
metadata.ArraySize = resource->desc.DepthOrArraySize;
metadata.Format = resource->desc.Format;
metadata.SampleDesc = resource->desc.SampleDesc;
metadata.Usage = D3D11_USAGE_DEFAULT;
metadata.BindFlags = D3D11_BIND_SHADER_RESOURCE;
metadata.CPUAccessFlags = 0;
metadata.MiscFlags = D3D11_RESOURCE_MISC_SHARED_NTHANDLE;
if (resource->desc.Flags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET)
metadata.BindFlags |= D3D11_BIND_RENDER_TARGET;
if (resource->desc.Flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL)
metadata.BindFlags |= D3D11_BIND_DEPTH_STENCIL;
if (resource->desc.Flags & D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS)
metadata.BindFlags |= D3D11_BIND_UNORDERED_ACCESS;
if (resource->desc.Flags & D3D12_RESOURCE_FLAG_DENY_SHADER_RESOURCE)
metadata.BindFlags &= ~D3D11_BIND_SHADER_RESOURCE;
if (!vkd3d_set_shared_metadata(*handle, &metadata, sizeof(metadata)))
ERR("Failed to set metadata for shared resource, importing created handle will fail.\n");
}
}
ID3D12Resource_Release(resource_iface);
return vr ? E_FAIL : S_OK;
}
if (SUCCEEDED(ID3D12DeviceChild_QueryInterface(object, &IID_ID3D12Fence, (void**)&fence_iface)))
{
VkSemaphoreGetWin32HandleInfoKHR win32_handle_info;
struct d3d12_shared_fence *fence;
VkResult vr;
if (!is_shared_ID3D12Fence(fence_iface))
{
ID3D12Fence_Release(fence_iface);
return DXGI_ERROR_INVALID_CALL;
}
fence = shared_impl_from_ID3D12Fence(fence_iface);
if (attributes)
FIXME("attributes %p not handled\n", attributes);
if (access)
FIXME("access %#x not handled\n", access);
if (name)
FIXME("name %s not handled\n", debugstr_w(name));
win32_handle_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR;
win32_handle_info.pNext = NULL;
win32_handle_info.semaphore = fence->timeline_semaphore;
win32_handle_info.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT;
vr = VK_CALL(vkGetSemaphoreWin32HandleKHR(device->vk_device, &win32_handle_info, handle));
ID3D12Fence_Release(fence_iface);
return vr ? E_FAIL : S_OK;
}
FIXME("Creating shared handle for type of object %p unsupported.\n", object);
return E_NOTIMPL;
#else
FIXME("CreateSharedHandle can only be implemented in native Win32.\n");
return E_NOTIMPL;
#endif
}
#ifdef _WIN32
static inline bool handle_is_kmt_style(HANDLE handle)
{
return ((ULONG_PTR)handle & 0x40000000) && ((ULONG_PTR)handle - 2) % 4 == 0;
}
#endif
static HRESULT STDMETHODCALLTYPE d3d12_device_OpenSharedHandle(d3d12_device_iface *iface,
HANDLE handle, REFIID riid, void **object)
{
#ifdef _WIN32
struct d3d12_device *device = impl_from_ID3D12Device(iface);
const struct vkd3d_vk_device_procs *vk_procs;
HRESULT hr;
vk_procs = &device->vk_procs;
TRACE("iface %p, handle %p, riid %s, object %p\n",
FIXME("iface %p, handle %p, riid %s, object %p stub!\n",
iface, handle, debugstr_guid(riid), object);
if (IsEqualGUID(riid, &IID_ID3D12Resource))
{
struct DxvkSharedTextureMetadata metadata;
D3D12_HEAP_PROPERTIES heap_props;
struct d3d12_resource *resource;
D3D12_RESOURCE_DESC1 desc;
bool kmt_handle = false;
if (handle_is_kmt_style(handle))
{
handle = vkd3d_open_kmt_handle(handle);
kmt_handle = true;
if (handle == INVALID_HANDLE_VALUE)
{
WARN("Failed to open KMT-style ID3D12Resource shared handle.\n");
*object = NULL;
return E_INVALIDARG;
}
}
if (!vkd3d_get_shared_metadata(handle, &metadata, sizeof(metadata), NULL))
{
WARN("Failed to get ID3D12Resource shared handle metadata.\n");
if (kmt_handle)
CloseHandle(handle);
*object = NULL;
return E_INVALIDARG;
}
desc.Dimension = D3D12_RESOURCE_DIMENSION_TEXTURE2D;
desc.Alignment = 0;
desc.Width = metadata.Width;
desc.Height = metadata.Height;
desc.DepthOrArraySize = metadata.ArraySize;
desc.MipLevels = metadata.MipLevels;
desc.Format = metadata.Format;
desc.SampleDesc = metadata.SampleDesc;
switch (metadata.TextureLayout)
{
case D3D11_TEXTURE_LAYOUT_UNDEFINED: desc.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN; break;
case D3D11_TEXTURE_LAYOUT_ROW_MAJOR: desc.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR; break;
case D3D11_TEXTURE_LAYOUT_64K_STANDARD_SWIZZLE: desc.Layout = D3D12_TEXTURE_LAYOUT_64KB_STANDARD_SWIZZLE; break;
default: desc.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN;
}
desc.Flags = D3D12_RESOURCE_FLAG_ALLOW_SIMULTANEOUS_ACCESS;
if (metadata.BindFlags & D3D11_BIND_RENDER_TARGET)
desc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET;
if (metadata.BindFlags & D3D11_BIND_DEPTH_STENCIL)
desc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL;
if (metadata.BindFlags & D3D11_BIND_UNORDERED_ACCESS)
desc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
if ((metadata.BindFlags & D3D11_BIND_DEPTH_STENCIL) && !(metadata.BindFlags & D3D11_BIND_SHADER_RESOURCE))
desc.Flags |= D3D12_RESOURCE_FLAG_DENY_SHADER_RESOURCE;
desc.SamplerFeedbackMipRegion.Width = 0;
desc.SamplerFeedbackMipRegion.Height = 0;
desc.SamplerFeedbackMipRegion.Depth = 0;
heap_props.Type = D3D12_HEAP_TYPE_DEFAULT;
heap_props.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
heap_props.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
heap_props.CreationNodeMask = 0;
heap_props.VisibleNodeMask = 0;
hr = d3d12_resource_create_committed(device, &desc, &heap_props,
D3D12_HEAP_FLAG_SHARED, D3D12_RESOURCE_STATE_COMMON, NULL, handle, &resource);
if (kmt_handle)
CloseHandle(handle);
if (FAILED(hr))
{
WARN("Failed to open shared ID3D12Resource, hr %#x.\n", hr);
*object = NULL;
return hr;
}
return return_interface(&resource->ID3D12Resource_iface, &IID_ID3D12Resource, riid, object);
}
if (IsEqualGUID(riid, &IID_ID3D12Fence))
{
VkImportSemaphoreWin32HandleInfoKHR import_info;
struct d3d12_shared_fence *fence;
VkResult vr;
hr = d3d12_shared_fence_create(device, 0, D3D12_FENCE_FLAG_SHARED, &fence);
if (FAILED(hr))
{
WARN("Failed to create object for imported ID3D12Fence, hr %#x.\n", hr);
*object = NULL;
return hr;
}
import_info.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR;
import_info.pNext = NULL;
import_info.semaphore = fence->timeline_semaphore;
import_info.flags = 0;
import_info.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT;
import_info.handle = handle;
import_info.name = NULL;
vr = VK_CALL(vkImportSemaphoreWin32HandleKHR(device->vk_device, &import_info));
if (vr != VK_SUCCESS)
{
WARN("Failed to open shared ID3D12Fence, vr %d.\n", vr);
ID3D12Fence1_Release(&fence->ID3D12Fence_iface);
*object = NULL;
return E_FAIL;
}
return return_interface(&fence->ID3D12Fence_iface, &IID_ID3D12Fence, riid, object);
}
FIXME("Opening shared handle type %s unsupported\n", debugstr_guid(riid));
return E_NOTIMPL;
#else
FIXME("OpenSharedhandle can only be implemented in native Win32.\n");
return E_NOTIMPL;
#endif
}
static HRESULT STDMETHODCALLTYPE d3d12_device_OpenSharedHandleByName(d3d12_device_iface *iface,
@ -4695,24 +4470,12 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreateFence(d3d12_device_iface *if
UINT64 initial_value, D3D12_FENCE_FLAGS flags, REFIID riid, void **fence)
{
struct d3d12_device *device = impl_from_ID3D12Device(iface);
struct d3d12_shared_fence *shared_object;
struct d3d12_fence *object;
HRESULT hr;
TRACE("iface %p, intial_value %#"PRIx64", flags %#x, riid %s, fence %p.\n",
iface, initial_value, flags, debugstr_guid(riid), fence);
if (flags & D3D12_FENCE_FLAG_SHARED)
{
if (SUCCEEDED(hr = d3d12_shared_fence_create(device, initial_value, flags, &shared_object)))
return return_interface(&shared_object->ID3D12Fence_iface, &IID_ID3D12Fence, riid, fence);
if (hr != E_NOTIMPL)
return hr;
FIXME("Shared fences not supported by Vulkan host, returning regular fence.\n");
}
if (FAILED(hr = d3d12_fence_create(device, initial_value, flags, &object)))
return hr;
@ -5221,7 +4984,7 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreateStateObject(d3d12_device_ifa
TRACE("iface %p, desc %p, iid %s, state_object %p!\n",
iface, desc, debugstr_guid(iid), state_object);
if (FAILED(hr = d3d12_state_object_create(device, desc, NULL, &state)))
if (FAILED(hr = d3d12_state_object_create(device, desc, &state)))
return hr;
return return_interface(&state->ID3D12StateObject_iface, &IID_ID3D12StateObject, iid, state_object);
@ -5265,10 +5028,6 @@ static void STDMETHODCALLTYPE d3d12_device_GetRaytracingAccelerationStructurePre
info->ResultDataMaxSizeInBytes = size_info.accelerationStructureSize;
info->ScratchDataSizeInBytes = size_info.buildScratchSize;
info->UpdateScratchDataSizeInBytes = size_info.updateScratchSize;
TRACE("ResultDataMaxSizeInBytes: %"PRIu64".\n", info->ResultDataMaxSizeInBytes);
TRACE("ScratchDatSizeInBytes: %"PRIu64".\n", info->ScratchDataSizeInBytes);
TRACE("UpdateScratchDataSizeInBytes: %"PRIu64".\n", info->UpdateScratchDataSizeInBytes);
}
static D3D12_DRIVER_MATCHING_IDENTIFIER_STATUS STDMETHODCALLTYPE d3d12_device_CheckDriverMatchingIdentifier(d3d12_device_iface *iface,
@ -5293,23 +5052,13 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_SetBackgroundProcessingMode(d3d12_
return E_NOTIMPL;
}
static HRESULT STDMETHODCALLTYPE d3d12_device_AddToStateObject(d3d12_device_iface *iface,
const D3D12_STATE_OBJECT_DESC *addition,
ID3D12StateObject *parent_state, REFIID riid, void **new_state_object)
static HRESULT STDMETHODCALLTYPE d3d12_device_AddToStateObject(d3d12_device_iface *iface, const D3D12_STATE_OBJECT_DESC *addition,
ID3D12StateObject *state_object, REFIID riid, void **new_state_object)
{
struct d3d12_device *device = impl_from_ID3D12Device(iface);
struct d3d12_state_object *parent;
struct d3d12_state_object *state;
HRESULT hr;
FIXME("iface %p, addition %p, state_object %p, riid %s, new_state_object %p stub!\n",
iface, addition, state_object, debugstr_guid(riid), new_state_object);
TRACE("iface %p, addition %p, state_object %p, riid %s, new_state_object %p stub!\n",
iface, addition, parent_state, debugstr_guid(riid), new_state_object);
parent = impl_from_ID3D12StateObject(parent_state);
if (FAILED(hr = d3d12_state_object_add(device, addition, parent, &state)))
return hr;
return return_interface(&state->ID3D12StateObject_iface, &IID_ID3D12StateObject, riid, new_state_object);
return E_NOTIMPL;
}
static HRESULT STDMETHODCALLTYPE d3d12_device_CreateProtectedResourceSession1(d3d12_device_iface *iface,
@ -5414,7 +5163,7 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreateCommittedResource2(d3d12_dev
FIXME("Ignoring protected session %p.\n", protected_session);
if (FAILED(hr = d3d12_resource_create_committed(device, desc, heap_properties,
heap_flags, initial_state, optimized_clear_value, NULL, &object)))
heap_flags, initial_state, optimized_clear_value, &object)))
{
*resource = NULL;
return hr;
@ -5839,14 +5588,16 @@ static D3D12_RESOURCE_HEAP_TIER d3d12_device_determine_heap_tier(struct d3d12_de
const VkPhysicalDeviceLimits *limits = &device->device_info.properties2.properties.limits;
const struct vkd3d_memory_info *mem_info = &device->memory_info;
const struct vkd3d_memory_info_domain *non_cpu_domain;
const struct vkd3d_memory_info_domain *cpu_domain;
non_cpu_domain = &mem_info->non_cpu_accessible_domain;
cpu_domain = &mem_info->cpu_accessible_domain;
/* Heap Tier 2 requires us to be able to create a heap that supports all resource
* categories at the same time, except RT/DS textures on UPLOAD/READBACK heaps.
* Ignore CPU visible heaps since we only place buffers there. Textures are promoted to committed always. */
// Heap Tier 2 requires us to be able to create a heap that supports all resource
// categories at the same time, except RT/DS textures on UPLOAD/READBACK heaps.
if (limits->bufferImageGranularity > D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT ||
!(non_cpu_domain->buffer_type_mask & non_cpu_domain->sampled_type_mask & non_cpu_domain->rt_ds_type_mask))
!(non_cpu_domain->buffer_type_mask & non_cpu_domain->sampled_type_mask & non_cpu_domain->rt_ds_type_mask) ||
!(cpu_domain->buffer_type_mask & cpu_domain->sampled_type_mask))
return D3D12_RESOURCE_HEAP_TIER_1;
return D3D12_RESOURCE_HEAP_TIER_2;
@ -6260,27 +6011,6 @@ static void d3d12_device_caps_init_shader_model(struct d3d12_device *device)
}
}
static void d3d12_device_caps_override_application(struct d3d12_device *device)
{
/* Some games rely on certain features to be exposed before they let the primary feature
* be exposed. */
switch (vkd3d_application_feature_override)
{
case VKD3D_APPLICATION_FEATURE_OVERRIDE_PROMOTE_DXR_TO_ULTIMATE:
if (device->d3d12_caps.options5.RaytracingTier >= D3D12_RAYTRACING_TIER_1_0)
{
device->d3d12_caps.options7.MeshShaderTier = D3D12_MESH_SHADER_TIER_1;
device->d3d12_caps.options7.SamplerFeedbackTier = D3D12_SAMPLER_FEEDBACK_TIER_1_0;
INFO("DXR enabled. Application also requires Mesh/Sampler feedback to be exposed (but unused). "
"Enabling these features automatically.\n");
}
break;
default:
break;
}
}
static void d3d12_device_caps_override(struct d3d12_device *device)
{
D3D_FEATURE_LEVEL fl_override = (D3D_FEATURE_LEVEL)0;
@ -6371,7 +6101,6 @@ static void d3d12_device_caps_init(struct d3d12_device *device)
d3d12_device_caps_init_feature_level(device);
d3d12_device_caps_override(device);
d3d12_device_caps_override_application(device);
}
static void vkd3d_init_shader_extensions(struct d3d12_device *device)
@ -6425,8 +6154,7 @@ static void vkd3d_init_shader_extensions(struct d3d12_device *device)
}
if (device->d3d12_caps.options4.Native16BitShaderOpsSupported &&
(device->device_info.driver_properties.driverID == VK_DRIVER_ID_MESA_RADV ||
(vkd3d_config_flags & VKD3D_CONFIG_FLAG_FORCE_NATIVE_FP16)))
device->device_info.driver_properties.driverID == VK_DRIVER_ID_MESA_RADV)
{
/* Native FP16 is buggy on NV for now. */
device->vk_info.shader_extensions[device->vk_info.shader_extension_count++] =
@ -6550,10 +6278,13 @@ static HRESULT d3d12_device_init(struct d3d12_device *device,
if (FAILED(hr = vkd3d_shader_debug_ring_init(&device->debug_ring, device)))
goto out_cleanup_meta_ops;
if (FAILED(hr = vkd3d_descriptor_update_ring_init(&device->descriptor_update_ring, device)))
goto out_cleanup_debug_ring;
#ifdef VKD3D_ENABLE_BREADCRUMBS
if (vkd3d_config_flags & VKD3D_CONFIG_FLAG_BREADCRUMBS)
if (FAILED(hr = vkd3d_breadcrumb_tracer_init(&device->breadcrumb_tracer, device)))
goto out_cleanup_debug_ring;
goto out_cleanup_update_ring;
#endif
if (vkd3d_descriptor_debug_active_qa_checks())
@ -6588,8 +6319,10 @@ out_cleanup_breadcrumb_tracer:
#ifdef VKD3D_ENABLE_BREADCRUMBS
if (vkd3d_config_flags & VKD3D_CONFIG_FLAG_BREADCRUMBS)
vkd3d_breadcrumb_tracer_cleanup(&device->breadcrumb_tracer, device);
out_cleanup_debug_ring:
out_cleanup_update_ring:
#endif
vkd3d_descriptor_update_ring_cleanup(&device->descriptor_update_ring, device);
out_cleanup_debug_ring:
vkd3d_shader_debug_ring_cleanup(&device->debug_ring, device);
out_cleanup_meta_ops:
vkd3d_meta_ops_cleanup(&device->meta_ops, device);

View File

@ -190,6 +190,13 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_vkd3d_ext_GetCudaSurfaceObject(ID3
return E_INVALIDARG;
device = d3d12_device_from_ID3D12DeviceExt(iface);
if (uav_handle.ptr & VKD3D_RESOURCE_DESC_DEFER_COPY_MASK)
{
INFO("Flushing copy queue in place due to weird GetCudaSurfaceObject call!\n");
vkd3d_descriptor_update_ring_flush(&device->descriptor_update_ring, device);
}
uav_desc = d3d12_desc_decode_va(uav_handle.ptr);
imageViewHandleInfo.imageView = uav_desc.view->info.view->vk_image_view;

View File

@ -327,25 +327,23 @@ static HRESULT vkd3d_import_host_memory(struct d3d12_device *device, void *host_
void *pNext, struct vkd3d_device_memory_allocation *allocation)
{
VkImportMemoryHostPointerInfoEXT import_info;
HRESULT hr = S_OK;
HRESULT hr;
import_info.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT;
import_info.pNext = pNext;
import_info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
import_info.pHostPointer = host_address;
if ((vkd3d_config_flags & VKD3D_CONFIG_FLAG_USE_HOST_IMPORT_FALLBACK) ||
FAILED(hr = vkd3d_try_allocate_device_memory(device, size,
if (FAILED(hr = vkd3d_try_allocate_device_memory(device, size,
type_flags, type_mask, &import_info, allocation)))
{
if (FAILED(hr))
WARN("Failed to import host memory, hr %#x.\n", hr);
WARN("Failed to import host memory, hr %#x.\n", hr);
/* If we failed, fall back to a host-visible allocation. Generally
* the app will access the memory thorugh the main host pointer,
* so it's fine. */
hr = vkd3d_try_allocate_device_memory(device, size,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
type_mask, pNext, allocation);
type_mask, &import_info, allocation);
}
return hr;
@ -1157,7 +1155,6 @@ static HRESULT vkd3d_memory_allocator_flush_clears_locked(struct vkd3d_memory_al
for (i = 0; i < queue_family->queue_count; i++)
{
vkd3d_queue_add_wait(queue_family->queues[i],
NULL,
clear_queue->vk_semaphore,
clear_queue->next_signal_value);
}
@ -1480,7 +1477,6 @@ static bool vkd3d_heap_allocation_accept_deferred_resource_placements(struct d3d
HRESULT vkd3d_allocate_heap_memory(struct d3d12_device *device, struct vkd3d_memory_allocator *allocator,
const struct vkd3d_allocate_heap_memory_info *info, struct vkd3d_memory_allocation *allocation)
{
struct vkd3d_allocate_heap_memory_info heap_info;
struct vkd3d_allocate_memory_info alloc_info;
HRESULT hr;
@ -1496,27 +1492,6 @@ HRESULT vkd3d_allocate_heap_memory(struct d3d12_device *device, struct vkd3d_mem
if (!(info->heap_desc.Flags & D3D12_HEAP_FLAG_DENY_BUFFERS))
alloc_info.flags |= VKD3D_ALLOCATION_FLAG_GLOBAL_BUFFER;
if (is_cpu_accessible_heap(&info->heap_desc.Properties))
{
if (info->heap_desc.Flags & D3D12_HEAP_FLAG_DENY_BUFFERS)
{
/* If the heap was only designed to handle images, the heap is useless,
* and we can force everything to go through committed path. */
memset(allocation, 0, sizeof(*allocation));
return S_OK;
}
else
{
/* CPU visible textures are never placed on a heap directly,
* since LINEAR images have alignment / size requirements
* that are vastly different from OPTIMAL ones.
* We can place buffers however. */
heap_info = *info;
info = &heap_info;
heap_info.heap_desc.Flags |= D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS;
}
}
hr = vkd3d_allocate_memory(device, allocator, &alloc_info, allocation);
if (hr == E_OUTOFMEMORY && vkd3d_heap_allocation_accept_deferred_resource_placements(device,
&info->heap_desc.Properties, info->heap_desc.Flags))

View File

@ -28,7 +28,6 @@ vkd3d_shaders =[
'shaders/vs_swapchain_fullscreen.vert',
'shaders/fs_swapchain_fullscreen.frag',
'shaders/cs_execute_indirect_patch.comp',
'shaders/cs_execute_indirect_patch_debug_ring.comp',
]
vkd3d_src = [
@ -67,10 +66,6 @@ if enable_breadcrumbs
vkd3d_src += ['breadcrumbs.c']
endif
if vkd3d_platform == 'windows'
vkd3d_src += ['shared_metadata.c']
endif
if not enable_d3d12
vkd3d_lib = shared_library('vkd3d-proton', vkd3d_src, glsl_generator.process(vkd3d_shaders), vkd3d_build, vkd3d_version,
dependencies : [ vkd3d_common_dep, vkd3d_shader_dep ] + vkd3d_extra_libs,

View File

@ -1244,24 +1244,14 @@ HRESULT vkd3d_execute_indirect_ops_init(struct vkd3d_execute_indirect_ops *meta_
return S_OK;
}
struct vkd3d_meta_execute_indirect_spec_constant_data
{
struct vkd3d_shader_debug_ring_spec_constants constants;
uint32_t workgroup_size_x;
};
HRESULT vkd3d_meta_get_execute_indirect_pipeline(struct vkd3d_meta_ops *meta_ops,
uint32_t patch_command_count, struct vkd3d_execute_indirect_info *info)
{
struct vkd3d_meta_execute_indirect_spec_constant_data execute_indirect_spec_constants;
VkSpecializationMapEntry map_entry[VKD3D_SHADER_DEBUG_RING_SPEC_INFO_MAP_ENTRIES + 1];
struct vkd3d_execute_indirect_ops *meta_indirect_ops = &meta_ops->execute_indirect;
struct vkd3d_shader_debug_ring_spec_info debug_ring_info;
VkSpecializationMapEntry map_entry;
VkSpecializationInfo spec;
HRESULT hr = S_OK;
VkResult vr;
bool debug;
size_t i;
int rc;
@ -1281,41 +1271,14 @@ HRESULT vkd3d_meta_get_execute_indirect_pipeline(struct vkd3d_meta_ops *meta_ops
}
}
debug = meta_ops->device->debug_ring.active;
map_entry.constantID = 0;
map_entry.offset = 0;
map_entry.size = sizeof(patch_command_count);
/* If we have debug ring, we can dump indirect command buffer data to the ring as well.
* Vital for debugging broken execute indirect data with templates. */
if (debug)
{
vkd3d_shader_debug_ring_init_spec_constant(meta_ops->device, &debug_ring_info,
0 /* Reserve this hash for internal debug streams. */);
memset(&execute_indirect_spec_constants, 0, sizeof(execute_indirect_spec_constants));
execute_indirect_spec_constants.constants = debug_ring_info.constants;
execute_indirect_spec_constants.workgroup_size_x = patch_command_count;
memcpy(map_entry, debug_ring_info.map_entries, sizeof(debug_ring_info.map_entries));
map_entry[VKD3D_SHADER_DEBUG_RING_SPEC_INFO_MAP_ENTRIES].constantID = 4;
map_entry[VKD3D_SHADER_DEBUG_RING_SPEC_INFO_MAP_ENTRIES].offset =
offsetof(struct vkd3d_meta_execute_indirect_spec_constant_data, workgroup_size_x);
map_entry[VKD3D_SHADER_DEBUG_RING_SPEC_INFO_MAP_ENTRIES].size = sizeof(patch_command_count);
spec.pMapEntries = map_entry;
spec.pData = &execute_indirect_spec_constants;
spec.mapEntryCount = ARRAY_SIZE(map_entry);
spec.dataSize = sizeof(execute_indirect_spec_constants);
}
else
{
map_entry[0].constantID = 0;
map_entry[0].offset = 0;
map_entry[0].size = sizeof(patch_command_count);
spec.pMapEntries = map_entry;
spec.pData = &patch_command_count;
spec.mapEntryCount = 1;
spec.dataSize = sizeof(patch_command_count);
}
spec.pMapEntries = &map_entry;
spec.pData = &patch_command_count;
spec.mapEntryCount = 1;
spec.dataSize = sizeof(patch_command_count);
vkd3d_array_reserve((void**)&meta_indirect_ops->pipelines, &meta_indirect_ops->pipelines_size,
meta_indirect_ops->pipelines_count + 1, sizeof(*meta_indirect_ops->pipelines));
@ -1323,8 +1286,7 @@ HRESULT vkd3d_meta_get_execute_indirect_pipeline(struct vkd3d_meta_ops *meta_ops
meta_indirect_ops->pipelines[meta_indirect_ops->pipelines_count].workgroup_size_x = patch_command_count;
vr = vkd3d_meta_create_compute_pipeline(meta_ops->device,
debug ? sizeof(cs_execute_indirect_patch_debug_ring) : sizeof(cs_execute_indirect_patch),
debug ? cs_execute_indirect_patch_debug_ring : cs_execute_indirect_patch,
sizeof(cs_execute_indirect_patch), cs_execute_indirect_patch,
meta_indirect_ops->vk_pipeline_layout, &spec,
&meta_indirect_ops->pipelines[meta_indirect_ops->pipelines_count].vk_pipeline);

File diff suppressed because it is too large Load Diff

View File

@ -488,7 +488,6 @@ static bool vkd3d_format_check_usage_support(struct d3d12_device *device, VkForm
struct vkd3d_image_create_info
{
struct vkd3d_format_compatibility_list format_compat_list;
VkExternalMemoryImageCreateInfo external_info;
VkImageFormatListCreateInfoKHR format_list;
VkImageCreateInfo image_info;
};
@ -499,7 +498,6 @@ static HRESULT vkd3d_get_image_create_info(struct d3d12_device *device,
struct vkd3d_image_create_info *create_info)
{
struct vkd3d_format_compatibility_list *compat_list = &create_info->format_compat_list;
VkExternalMemoryImageCreateInfo *external_info = &create_info->external_info;
VkImageFormatListCreateInfoKHR *format_list = &create_info->format_list;
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
VkImageCreateInfo *image_info = &create_info->image_info;
@ -524,22 +522,12 @@ static HRESULT vkd3d_get_image_create_info(struct d3d12_device *device,
image_info->sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_info->pNext = NULL;
image_info->flags = 0;
if (resource && (resource->heap_flags & D3D12_HEAP_FLAG_SHARED))
{
external_info->sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
external_info->pNext = NULL;
external_info->handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT;
image_info->pNext = external_info;
}
if (!(desc->Flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL))
{
if (vkd3d_get_format_compatibility_list(device, desc, compat_list))
{
format_list->sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR;
format_list->pNext = image_info->pNext;
format_list->pNext = NULL;
format_list->viewFormatCount = compat_list->format_count;
format_list->pViewFormats = compat_list->vk_formats;
@ -2713,7 +2701,7 @@ static HRESULT d3d12_resource_create(struct d3d12_device *device, uint32_t flags
HRESULT d3d12_resource_create_committed(struct d3d12_device *device, const D3D12_RESOURCE_DESC1 *desc,
const D3D12_HEAP_PROPERTIES *heap_properties, D3D12_HEAP_FLAGS heap_flags, D3D12_RESOURCE_STATES initial_state,
const D3D12_CLEAR_VALUE *optimized_clear_value, HANDLE shared_handle, struct d3d12_resource **resource)
const D3D12_CLEAR_VALUE *optimized_clear_value, struct d3d12_resource **resource)
{
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
struct d3d12_resource *object;
@ -2734,11 +2722,6 @@ HRESULT d3d12_resource_create_committed(struct d3d12_device *device, const D3D12
bool use_dedicated_allocation;
VkResult vr;
#ifdef _WIN32
VkImportMemoryWin32HandleInfoKHR import_info;
VkExportMemoryAllocateInfo export_info;
#endif
if (FAILED(hr = d3d12_resource_create_vk_resource(object, device)))
goto fail;
@ -2771,36 +2754,10 @@ HRESULT d3d12_resource_create_committed(struct d3d12_device *device, const D3D12
else
allocate_info.heap_flags |= D3D12_HEAP_FLAG_ALLOW_ONLY_NON_RT_DS_TEXTURES;
if (heap_flags & D3D12_HEAP_FLAG_SHARED)
{
#ifdef _WIN32
use_dedicated_allocation = true;
if (shared_handle && shared_handle != INVALID_HANDLE_VALUE)
{
import_info.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR;
import_info.pNext = allocate_info.pNext;
import_info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT;
import_info.handle = shared_handle;
import_info.name = NULL;
allocate_info.pNext = &import_info;
}
else
{
export_info.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO;
export_info.pNext = allocate_info.pNext;
export_info.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT;
allocate_info.pNext = &export_info;
}
#else
FIXME("D3D12_HEAP_FLAG_SHARED can only be implemented in native Win32.\n");
#endif
}
if (use_dedicated_allocation)
{
dedicated_info.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO;
dedicated_info.pNext = allocate_info.pNext;
dedicated_info.pNext = NULL;
dedicated_info.image = object->res.vk_image;
dedicated_info.buffer = VK_NULL_HANDLE;
allocate_info.pNext = &dedicated_info;
@ -2906,30 +2863,20 @@ HRESULT d3d12_resource_create_placed(struct d3d12_device *device, const D3D12_RE
VkMemoryRequirements memory_requirements;
VkBindImageMemoryInfo bind_info;
struct d3d12_resource *object;
bool force_committed;
VkResult vr;
HRESULT hr;
if (FAILED(hr = d3d12_resource_validate_heap(desc, heap)))
return hr;
/* Placed linear textures are ... problematic
* since we have no way of signalling that they have different alignment and size requirements
* than optimal textures. GetResourceAllocationInfo() does not take heap property information
* and assumes that we are not modifying the tiling mode. */
force_committed = desc->Dimension != D3D12_RESOURCE_DIMENSION_BUFFER &&
is_cpu_accessible_heap(&heap->desc.Properties);
if (force_committed || heap->allocation.device_allocation.vk_memory == VK_NULL_HANDLE)
if (heap->allocation.device_allocation.vk_memory == VK_NULL_HANDLE)
{
if (!force_committed)
WARN("Placing resource on heap with no memory backing it. Falling back to committed resource.\n");
WARN("Placing resource on heap with no memory backing it. Falling back to committed resource.\n");
if (FAILED(hr = d3d12_resource_create_committed(device, desc, &heap->desc.Properties,
heap->desc.Flags & ~(D3D12_HEAP_FLAG_DENY_BUFFERS |
D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES |
D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES),
initial_state, optimized_clear_value, NULL, resource)))
initial_state, optimized_clear_value, resource)))
{
ERR("Failed to create fallback committed resource.\n");
}
@ -5348,6 +5295,22 @@ static ULONG STDMETHODCALLTYPE d3d12_descriptor_heap_AddRef(ID3D12DescriptorHeap
return refcount;
}
void d3d12_descriptor_heap_inc_ref(struct d3d12_descriptor_heap *heap)
{
InterlockedIncrement(&heap->internal_refcount);
}
void d3d12_descriptor_heap_dec_ref(struct d3d12_descriptor_heap *heap)
{
ULONG refcount = InterlockedDecrement(&heap->internal_refcount);
if (!refcount)
{
d3d12_descriptor_heap_cleanup(heap);
vkd3d_free_aligned(heap);
}
}
static ULONG STDMETHODCALLTYPE d3d12_descriptor_heap_Release(ID3D12DescriptorHeap *iface)
{
struct d3d12_descriptor_heap *heap = impl_from_ID3D12DescriptorHeap(iface);
@ -5358,11 +5321,12 @@ static ULONG STDMETHODCALLTYPE d3d12_descriptor_heap_Release(ID3D12DescriptorHea
if (!refcount)
{
struct d3d12_device *device = heap->device;
d3d12_descriptor_heap_cleanup(heap);
/* There might be pending descriptor copies from our heap, so keep it alive
* until we have observed that all possible copies have completed using the current
* write_count timestamp. */
vkd3d_descriptor_update_ring_mark_heap_destruction(&device->descriptor_update_ring, heap);
vkd3d_private_store_destroy(&heap->private_store);
vkd3d_free_aligned(heap);
d3d12_descriptor_heap_dec_ref(heap);
d3d12_device_release(device);
}
@ -5906,6 +5870,7 @@ static HRESULT d3d12_descriptor_heap_init(struct d3d12_descriptor_heap *descript
memset(descriptor_heap, 0, sizeof(*descriptor_heap));
descriptor_heap->ID3D12DescriptorHeap_iface.lpVtbl = &d3d12_descriptor_heap_vtbl;
descriptor_heap->refcount = 1;
descriptor_heap->internal_refcount = 1;
descriptor_heap->device = device;
descriptor_heap->desc = *desc;
@ -6058,6 +6023,18 @@ HRESULT d3d12_descriptor_heap_create(struct d3d12_device *device,
{
/* See comments above on how this is supposed to work */
object->cpu_va.ptr = (SIZE_T)object + num_descriptor_bits;
/* FIXME: This is gross. We need to repurpose some of the lower order bits to make this robust. */
if (object->cpu_va.ptr & VKD3D_RESOURCE_DESC_DEFER_COPY_MASK)
{
FIXME("High bit in CPU va is set ...\n");
vkd3d_free_aligned(object);
return E_OUTOFMEMORY;
}
if (desc->Type == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV &&
(desc->Flags & D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE))
object->cpu_va.ptr |= VKD3D_RESOURCE_DESC_DEFER_COPY_MASK;
}
else
{
@ -6097,6 +6074,172 @@ void d3d12_descriptor_heap_cleanup(struct d3d12_descriptor_heap *descriptor_heap
vkd3d_descriptor_debug_unregister_heap(descriptor_heap->cookie);
}
HRESULT vkd3d_descriptor_update_ring_init(struct vkd3d_descriptor_update_ring *ring,
struct d3d12_device *device)
{
D3D12_DESCRIPTOR_HEAP_DESC desc;
HRESULT hr;
desc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV;
desc.NumDescriptors = VKD3D_DESCRIPTOR_UPDATE_RING_SIZE;
desc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
desc.NodeMask = 0;
if (FAILED(hr = d3d12_descriptor_heap_create(device, &desc, &ring->staging)))
return hr;
/* Hold private reference. */
d3d12_descriptor_heap_inc_ref(ring->staging);
d3d12_descriptor_heap_Release(&ring->staging->ID3D12DescriptorHeap_iface);
ring->staging_base = ring->staging->cpu_va.ptr;
ring->cbv_srv_uav_copies = vkd3d_calloc(VKD3D_DESCRIPTOR_UPDATE_RING_SIZE, sizeof(*ring->cbv_srv_uav_copies));
pthread_mutex_init(&ring->submission_lock, NULL);
return S_OK;
}
void vkd3d_descriptor_update_ring_cleanup(struct vkd3d_descriptor_update_ring *ring,
struct d3d12_device *device)
{
size_t i;
d3d12_descriptor_heap_dec_ref(ring->staging);
pthread_mutex_destroy(&ring->submission_lock);
vkd3d_free(ring->cbv_srv_uav_copies);
for (i = 0; i < ring->deferred_release_count; i++)
d3d12_descriptor_heap_dec_ref(ring->deferred_releases[i].heap);
vkd3d_free(ring->deferred_releases);
}
vkd3d_cpu_descriptor_va_t vkd3d_descriptor_update_ring_allocate_scratch(struct vkd3d_descriptor_update_ring *ring)
{
/* FIXME: This can in theory overflow before we've read all entries. If we start to fill the buffer,
* we can do an emergency stall, but it should basically never happen unless application
* is specifically trying to break us. We can read the read_count. */
uint32_t offset = vkd3d_atomic_uint32_increment(&ring->staging_write_count, vkd3d_memory_order_relaxed) - 1;
offset &= VKD3D_DESCRIPTOR_UPDATE_RING_MASK;
return ring->staging_base + offset * VKD3D_RESOURCE_DESC_INCREMENT;
}
void vkd3d_descriptor_update_ring_push_copy(struct vkd3d_descriptor_update_ring *ring,
struct d3d12_device *device,
vkd3d_cpu_descriptor_va_t src_, vkd3d_cpu_descriptor_va_t dst_)
{
/* FIXME: Have to handle the case where we're overflowing the buffers. */
struct vkd3d_descriptor_update_ring_copy *copy;
void *src = (void*)src_;
void *dst = (void*)dst_;
uint32_t write_count;
uint32_t read_count;
uint32_t offset;
/* Detect internal overflow. When we've filled half the buffer it's getting spicy,
* so perform an emergency flush.
* This should only happen in code that tries hard to break us. */
write_count = vkd3d_atomic_uint32_load_explicit(&ring->write_count, vkd3d_memory_order_relaxed);
read_count = vkd3d_atomic_uint32_load_explicit(&ring->read_count, vkd3d_memory_order_relaxed);
if ((write_count - read_count) > VKD3D_DESCRIPTOR_UPDATE_RING_SIZE / 2)
{
INFO("Emergency descriptor update ring flush in render threads!\n");
vkd3d_descriptor_update_ring_flush(ring, device);
}
write_count = vkd3d_atomic_uint32_increment(&ring->write_count, vkd3d_memory_order_relaxed) - 1;
offset = write_count & VKD3D_DESCRIPTOR_UPDATE_RING_MASK;
copy = &ring->cbv_srv_uav_copies[offset];
/* There is a race here where submission threads may observe that the write count has increased, but application
* has not completely written the descriptor copy write yet.
* Submission will stop processing in this case. Next iteration, the read count will start working on the first failed descriptor.
* All descriptor updates which happened before the ExecuteCommandLists
* is guaranteed to be completely written, so the only edge case occurs if ExecuteCommandLists is called
* concurrently with descriptor updates, but it's not guaranteed that ExecuteCommandLists would observe those descriptors
* in the first place. The only real memory order we need to consider is release semantic on the dst write.
* Submission thread will read dst first with acquire order, then src. */
vkd3d_atomic_ptr_store_explicit(&copy->src, src, vkd3d_memory_order_relaxed);
vkd3d_atomic_ptr_store_explicit(&copy->dst, dst, vkd3d_memory_order_release);
}
void vkd3d_descriptor_update_ring_mark_heap_destruction(struct vkd3d_descriptor_update_ring *ring,
struct d3d12_descriptor_heap *heap)
{
uint32_t write_count;
if (heap->desc.Type != D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV)
return;
write_count = vkd3d_atomic_uint32_load_explicit(&ring->write_count, vkd3d_memory_order_relaxed);
d3d12_descriptor_heap_inc_ref(heap);
pthread_mutex_lock(&ring->submission_lock);
vkd3d_array_reserve((void**)&ring->deferred_releases, &ring->deferred_release_size,
ring->deferred_release_count + 1, sizeof(*ring->deferred_releases));
ring->deferred_releases[ring->deferred_release_count].heap = heap;
ring->deferred_releases[ring->deferred_release_count].write_count = write_count;
ring->deferred_release_count++;
pthread_mutex_unlock(&ring->submission_lock);
}
void vkd3d_descriptor_update_ring_flush(struct vkd3d_descriptor_update_ring *ring,
struct d3d12_device *device)
{
/* This is called from submission threads and perform the main "meat" of the descriptor management. */
uint32_t write_count = vkd3d_atomic_uint32_load_explicit(&ring->write_count, vkd3d_memory_order_acquire);
struct vkd3d_descriptor_update_ring_copy *copy;
vkd3d_cpu_descriptor_va_t src, dst;
uint32_t read_count;
size_t i, j;
pthread_mutex_lock(&ring->submission_lock);
read_count = ring->read_count;
/* Wrap arithmetic, so != check is appropriate. */
while (read_count != write_count)
{
copy = &ring->cbv_srv_uav_copies[read_count & VKD3D_DESCRIPTOR_UPDATE_RING_MASK];
dst = (vkd3d_cpu_descriptor_va_t)vkd3d_atomic_ptr_load_explicit(&copy->dst, vkd3d_memory_order_acquire);
if (dst)
{
src = (vkd3d_cpu_descriptor_va_t) vkd3d_atomic_ptr_load_explicit(&copy->src, vkd3d_memory_order_relaxed);
d3d12_desc_copy_single(dst, src, device);
/* Consume the descriptor. */
vkd3d_atomic_ptr_store_explicit(&copy->dst, NULL, vkd3d_memory_order_release);
vkd3d_atomic_ptr_store_explicit(&copy->src, NULL, vkd3d_memory_order_release);
}
else
{
/* Race condition, but it's okay. We'll stop here and pick it up later.
* The write to dst will come in finite time. */
break;
}
read_count++;
}
vkd3d_atomic_uint32_store_explicit(&ring->read_count, read_count, vkd3d_memory_order_release);
/* If we're done copying descriptors from a heap, we can now release it safely. */
for (i = 0, j = 0; i < ring->deferred_release_count; i++)
{
/* With wrap arithmetic, test that read_count is positive. In the case where read counter hasn't caught up,
* the wrapped value will be close-ish to UINT_MAX instead. */
if ((read_count - ring->deferred_releases[i].write_count) <= VKD3D_DESCRIPTOR_UPDATE_RING_SIZE)
d3d12_descriptor_heap_dec_ref(ring->deferred_releases[i].heap);
else
ring->deferred_releases[j++] = ring->deferred_releases[i];
}
ring->deferred_release_count = j;
pthread_mutex_unlock(&ring->submission_lock);
}
static void d3d12_query_heap_set_name(struct d3d12_query_heap *heap, const char *name)
{
if (heap->vk_query_pool)

View File

@ -11,6 +11,12 @@ struct Command
uint dst_offset;
};
const int COMMAND_TYPE_COPY_U32 = 0;
const int COMMAND_TYPE_COPY_INDEX_TYPE = 1;
const int DXGI_FORMAT_R32_UINT = 0x2a;
const int VK_INDEX_TYPE_UINT16 = 0;
const int VK_INDEX_TYPE_UINT32 = 1;
layout(buffer_reference, std430, buffer_reference_align = 4) readonly buffer Commands
{
Command commands[];
@ -48,11 +54,10 @@ void main()
Command cmd = commands_va.commands[gl_LocalInvocationIndex];
uint draw_id = gl_WorkGroupID.x;
uint max_draws = gl_NumWorkGroups.x;
uint max_draws = ~0u;
if (any(notEqual(indirect_count_va, uvec2(0))))
{
max_draws = min(max_draws, IndirectCount(indirect_count_va).count);
max_draws = IndirectCount(indirect_count_va).count;
if (gl_WorkGroupID.x == 0u)
dst_indirect_count_va.count = max_draws;
}
@ -61,7 +66,11 @@ void main()
{
uint src_offset = src_stride * draw_id + cmd.src_offset;
uint dst_offset = dst_stride * draw_id + cmd.dst_offset;
uint src_value = src_buffer_va.values[src_offset];
if (cmd.type == COMMAND_TYPE_COPY_INDEX_TYPE)
src_value = src_value == DXGI_FORMAT_R32_UINT ? VK_INDEX_TYPE_UINT32 : VK_INDEX_TYPE_UINT16;
dst_buffer_va.values[dst_offset] = src_value;
}
}

View File

@ -1,83 +0,0 @@
#version 450
#extension GL_EXT_buffer_reference : require
#extension GL_EXT_buffer_reference_uvec2 : require
#extension GL_GOOGLE_include_directive : require
#include "../../../include/shader-debug/debug_channel.h"
layout(local_size_x_id = 4) in;
struct Command
{
uint type;
uint src_offset;
uint dst_offset;
};
layout(buffer_reference, std430, buffer_reference_align = 4) readonly buffer Commands
{
Command commands[];
};
layout(buffer_reference, std430, buffer_reference_align = 4) readonly buffer SrcBuffer {
uint values[];
};
layout(buffer_reference, std430, buffer_reference_align = 4) writeonly buffer DstBuffer {
uint values[];
};
layout(buffer_reference, std430, buffer_reference_align = 4) readonly buffer IndirectCount {
uint count;
};
layout(buffer_reference, std430, buffer_reference_align = 4) writeonly buffer IndirectCountWrite {
uint count;
};
layout(push_constant) uniform Registers
{
Commands commands_va;
SrcBuffer src_buffer_va;
DstBuffer dst_buffer_va;
uvec2 indirect_count_va;
IndirectCountWrite dst_indirect_count_va;
uint src_stride;
uint dst_stride;
// Debug metadata here
uint debug_tag;
uint implicit_instance;
};
void main()
{
if (debug_tag != 0u)
DEBUG_CHANNEL_INIT_IMPLICIT_INSTANCE(uvec3(debug_tag, gl_WorkGroupID.x, gl_LocalInvocationIndex), implicit_instance);
Command cmd = commands_va.commands[gl_LocalInvocationIndex];
uint draw_id = gl_WorkGroupID.x;
uint max_draws = gl_NumWorkGroups.x;
if (any(notEqual(indirect_count_va, uvec2(0))))
{
max_draws = min(max_draws, IndirectCount(indirect_count_va).count);
if (gl_WorkGroupID.x == 0u)
dst_indirect_count_va.count = max_draws;
}
if (debug_tag != 0u && gl_WorkGroupID.x == 0)
DEBUG_CHANNEL_MSG_UNIFORM(int(max_draws), int(gl_NumWorkGroups.x));
if (draw_id < max_draws)
{
uint src_offset = src_stride * draw_id + cmd.src_offset;
uint dst_offset = dst_stride * draw_id + cmd.dst_offset;
uint src_value = src_buffer_va.values[src_offset];
if (debug_tag != 0u)
DEBUG_CHANNEL_MSG(cmd.type, dst_offset, src_offset, src_value);
dst_buffer_va.values[dst_offset] = src_value;
}
}

View File

@ -1,68 +0,0 @@
/*
* Copyright 2021 Derek Lesho for Codeweavers
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*/
#define VKD3D_DBG_CHANNEL VKD3D_DBG_CHANNEL_API
#include "vkd3d_private.h"
#include "winioctl.h"
#define IOCTL_SHARED_GPU_RESOURCE_SET_METADATA CTL_CODE(FILE_DEVICE_VIDEO, 4, METHOD_BUFFERED, FILE_WRITE_ACCESS)
#define IOCTL_SHARED_GPU_RESOURCE_GET_METADATA CTL_CODE(FILE_DEVICE_VIDEO, 5, METHOD_BUFFERED, FILE_READ_ACCESS)
#define IOCTL_SHARED_GPU_RESOURCE_OPEN CTL_CODE(FILE_DEVICE_VIDEO, 1, METHOD_BUFFERED, FILE_WRITE_ACCESS)
bool vkd3d_set_shared_metadata(HANDLE handle, void *buf, uint32_t buf_size)
{
DWORD ret_size;
return DeviceIoControl(handle, IOCTL_SHARED_GPU_RESOURCE_SET_METADATA, buf, buf_size, NULL, 0, &ret_size, NULL);
}
bool vkd3d_get_shared_metadata(HANDLE handle, void *buf, uint32_t buf_size, uint32_t *metadata_size)
{
DWORD ret_size;
bool ret = DeviceIoControl(handle, IOCTL_SHARED_GPU_RESOURCE_GET_METADATA, NULL, 0, buf, buf_size, &ret_size, NULL);
if (metadata_size)
*metadata_size = ret_size;
return ret;
}
HANDLE vkd3d_open_kmt_handle(HANDLE kmt_handle)
{
struct
{
unsigned int kmt_handle;
/* the following parameter represents a larger sized string for a dynamically allocated struct for use when opening an object by name */
WCHAR name[1];
} shared_resource_open;
HANDLE nt_handle = CreateFileA("\\\\.\\SharedGpuResource", GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
if (nt_handle == INVALID_HANDLE_VALUE)
return nt_handle;
shared_resource_open.kmt_handle = (ULONG_PTR)kmt_handle;
shared_resource_open.name[0] = 0;
if (!DeviceIoControl(nt_handle, IOCTL_SHARED_GPU_RESOURCE_OPEN, &shared_resource_open, sizeof(shared_resource_open), NULL, 0, NULL, NULL))
{
CloseHandle(nt_handle);
return INVALID_HANDLE_VALUE;
}
return nt_handle;
}

View File

@ -1473,36 +1473,8 @@ fail:
return hr;
}
HRESULT d3d12_root_signature_create_empty(struct d3d12_device *device,
struct d3d12_root_signature **root_signature)
{
struct d3d12_root_signature *object;
D3D12_ROOT_SIGNATURE_DESC1 desc;
HRESULT hr;
if (!(object = vkd3d_malloc(sizeof(*object))))
return E_OUTOFMEMORY;
memset(&desc, 0, sizeof(desc));
hr = d3d12_root_signature_init(object, device, &desc);
/* For pipeline libraries, (and later DXR to some degree), we need a way to
* compare root signature objects. */
object->compatibility_hash = 0;
if (FAILED(hr))
{
vkd3d_free(object);
return hr;
}
*root_signature = object;
return S_OK;
}
static HRESULT d3d12_root_signature_create_from_blob(struct d3d12_device *device,
const void *bytecode, size_t bytecode_length, bool raw_payload,
struct d3d12_root_signature **root_signature)
HRESULT d3d12_root_signature_create(struct d3d12_device *device,
const void *bytecode, size_t bytecode_length, struct d3d12_root_signature **root_signature)
{
const struct vkd3d_shader_code dxbc = {bytecode, bytecode_length};
union
@ -1510,26 +1482,14 @@ static HRESULT d3d12_root_signature_create_from_blob(struct d3d12_device *device
D3D12_VERSIONED_ROOT_SIGNATURE_DESC d3d12;
struct vkd3d_versioned_root_signature_desc vkd3d;
} root_signature_desc;
vkd3d_shader_hash_t compatibility_hash;
struct d3d12_root_signature *object;
HRESULT hr;
int ret;
if (raw_payload)
if ((ret = vkd3d_parse_root_signature_v_1_1(&dxbc, &root_signature_desc.vkd3d)) < 0)
{
if ((ret = vkd3d_parse_root_signature_v_1_1_from_raw_payload(&dxbc, &root_signature_desc.vkd3d, &compatibility_hash)))
{
WARN("Failed to parse root signature, vkd3d result %d.\n", ret);
return hresult_from_vkd3d_result(ret);
}
}
else
{
if ((ret = vkd3d_parse_root_signature_v_1_1(&dxbc, &root_signature_desc.vkd3d, &compatibility_hash)) < 0)
{
WARN("Failed to parse root signature, vkd3d result %d.\n", ret);
return hresult_from_vkd3d_result(ret);
}
WARN("Failed to parse root signature, vkd3d result %d.\n", ret);
return hresult_from_vkd3d_result(ret);
}
if (!(object = vkd3d_malloc(sizeof(*object))))
@ -1542,7 +1502,7 @@ static HRESULT d3d12_root_signature_create_from_blob(struct d3d12_device *device
/* For pipeline libraries, (and later DXR to some degree), we need a way to
* compare root signature objects. */
object->compatibility_hash = compatibility_hash;
object->compatibility_hash = vkd3d_shader_hash(&dxbc);
vkd3d_shader_free_root_signature(&root_signature_desc.vkd3d);
if (FAILED(hr))
@ -1558,20 +1518,6 @@ static HRESULT d3d12_root_signature_create_from_blob(struct d3d12_device *device
return S_OK;
}
HRESULT d3d12_root_signature_create(struct d3d12_device *device,
const void *bytecode, size_t bytecode_length,
struct d3d12_root_signature **root_signature)
{
return d3d12_root_signature_create_from_blob(device, bytecode, bytecode_length, false, root_signature);
}
HRESULT d3d12_root_signature_create_raw(struct d3d12_device *device,
const void *payload, size_t payload_length,
struct d3d12_root_signature **root_signature)
{
return d3d12_root_signature_create_from_blob(device, payload, payload_length, true, root_signature);
}
unsigned int d3d12_root_signature_get_shader_interface_flags(const struct d3d12_root_signature *root_signature)
{
unsigned int flags = 0;

View File

@ -959,7 +959,7 @@ static HRESULT d3d12_swapchain_create_user_buffers(struct d3d12_swapchain *swapc
{
if (FAILED(hr = d3d12_resource_create_committed(d3d12_swapchain_device(swapchain),
&resource_desc, &heap_properties, D3D12_HEAP_FLAG_NONE,
D3D12_RESOURCE_STATE_PRESENT, NULL, NULL, &object)))
D3D12_RESOURCE_STATE_PRESENT, NULL, &object)))
{
ERR("Failed to create image for swapchain buffer");
return hr;

View File

@ -165,28 +165,15 @@ static CONST_VTBL struct ID3D12RootSignatureDeserializerVtbl d3d12_root_signatur
static int vkd3d_parse_root_signature_for_version(const struct vkd3d_shader_code *dxbc,
struct vkd3d_versioned_root_signature_desc *out_desc,
enum vkd3d_root_signature_version target_version,
bool raw_payload,
vkd3d_shader_hash_t *compatibility_hash)
enum vkd3d_root_signature_version target_version)
{
struct vkd3d_versioned_root_signature_desc desc, converted_desc;
int ret;
if (raw_payload)
if ((ret = vkd3d_shader_parse_root_signature(dxbc, &desc)) < 0)
{
if ((ret = vkd3d_shader_parse_root_signature_raw(dxbc->code, dxbc->size, &desc, compatibility_hash)) < 0)
{
WARN("Failed to parse root signature, vkd3d result %d.\n", ret);
return ret;
}
}
else
{
if ((ret = vkd3d_shader_parse_root_signature(dxbc, &desc, compatibility_hash)) < 0)
{
WARN("Failed to parse root signature, vkd3d result %d.\n", ret);
return ret;
}
WARN("Failed to parse root signature, vkd3d result %d.\n", ret);
return ret;
}
if (desc.version == target_version)
@ -210,27 +197,15 @@ static int vkd3d_parse_root_signature_for_version(const struct vkd3d_shader_code
}
int vkd3d_parse_root_signature_v_1_0(const struct vkd3d_shader_code *dxbc,
struct vkd3d_versioned_root_signature_desc *out_desc,
vkd3d_shader_hash_t *compatibility_hash)
struct vkd3d_versioned_root_signature_desc *out_desc)
{
return vkd3d_parse_root_signature_for_version(dxbc, out_desc, VKD3D_ROOT_SIGNATURE_VERSION_1_0, false,
compatibility_hash);
return vkd3d_parse_root_signature_for_version(dxbc, out_desc, VKD3D_ROOT_SIGNATURE_VERSION_1_0);
}
int vkd3d_parse_root_signature_v_1_1(const struct vkd3d_shader_code *dxbc,
struct vkd3d_versioned_root_signature_desc *out_desc,
vkd3d_shader_hash_t *compatibility_hash)
struct vkd3d_versioned_root_signature_desc *out_desc)
{
return vkd3d_parse_root_signature_for_version(dxbc, out_desc, VKD3D_ROOT_SIGNATURE_VERSION_1_1, false,
compatibility_hash);
}
int vkd3d_parse_root_signature_v_1_1_from_raw_payload(const struct vkd3d_shader_code *dxbc,
struct vkd3d_versioned_root_signature_desc *out_desc,
vkd3d_shader_hash_t *compatibility_hash)
{
return vkd3d_parse_root_signature_for_version(dxbc, out_desc, VKD3D_ROOT_SIGNATURE_VERSION_1_1, true,
compatibility_hash);
return vkd3d_parse_root_signature_for_version(dxbc, out_desc, VKD3D_ROOT_SIGNATURE_VERSION_1_1);
}
static HRESULT d3d12_root_signature_deserializer_init(struct d3d12_root_signature_deserializer *deserializer,
@ -241,7 +216,7 @@ static HRESULT d3d12_root_signature_deserializer_init(struct d3d12_root_signatur
deserializer->ID3D12RootSignatureDeserializer_iface.lpVtbl = &d3d12_root_signature_deserializer_vtbl;
deserializer->refcount = 1;
if ((ret = vkd3d_parse_root_signature_v_1_0(dxbc, &deserializer->desc.vkd3d, NULL)) < 0)
if ((ret = vkd3d_parse_root_signature_v_1_0(dxbc, &deserializer->desc.vkd3d)) < 0)
return hresult_from_vkd3d_result(ret);
return S_OK;
@ -419,7 +394,7 @@ static HRESULT d3d12_versioned_root_signature_deserializer_init(struct d3d12_ver
deserializer->ID3D12VersionedRootSignatureDeserializer_iface.lpVtbl = &d3d12_versioned_root_signature_deserializer_vtbl;
deserializer->refcount = 1;
if ((ret = vkd3d_shader_parse_root_signature(dxbc, &deserializer->desc.vkd3d, NULL)) < 0)
if ((ret = vkd3d_shader_parse_root_signature(dxbc, &deserializer->desc.vkd3d)) < 0)
{
WARN("Failed to parse root signature, vkd3d result %d.\n", ret);
return hresult_from_vkd3d_result(ret);

View File

@ -61,8 +61,6 @@
#define VKD3D_TILE_SIZE 65536
typedef ID3D12Fence1 d3d12_fence_iface;
struct d3d12_command_list;
struct d3d12_device;
struct d3d12_resource;
@ -137,8 +135,6 @@ struct vkd3d_vulkan_info
bool KHR_maintenance4;
bool KHR_ray_tracing_maintenance1;
bool KHR_fragment_shader_barycentric;
bool KHR_external_memory_win32;
bool KHR_external_semaphore_win32;
/* EXT device extensions */
bool EXT_calibrated_timestamps;
bool EXT_conditional_rendering;
@ -230,12 +226,8 @@ HRESULT vkd3d_join_thread(struct vkd3d_instance *instance, union vkd3d_thread_ha
struct vkd3d_waiting_fence
{
d3d12_fence_iface *fence;
VkSemaphore submission_timeline;
struct d3d12_fence *fence;
uint64_t value;
LONG **submission_counters;
size_t num_submission_counts;
bool signal;
};
struct vkd3d_fence_worker
@ -516,6 +508,8 @@ static inline HRESULT vkd3d_set_private_data_interface(struct vkd3d_private_stor
HRESULT STDMETHODCALLTYPE d3d12_object_SetName(ID3D12Object *iface, const WCHAR *name);
/* ID3D12Fence */
typedef ID3D12Fence1 d3d12_fence_iface;
struct d3d12_fence_value
{
uint64_t virtual_value;
@ -585,52 +579,6 @@ HRESULT d3d12_fence_create(struct d3d12_device *device,
HRESULT d3d12_fence_set_event_on_completion(struct d3d12_fence *fence,
UINT64 value, HANDLE event, enum vkd3d_waiting_event_type type);
struct d3d12_shared_fence
{
d3d12_fence_iface ID3D12Fence_iface;
LONG refcount_internal;
LONG refcount;
D3D12_FENCE_FLAGS d3d12_flags;
VkSemaphore timeline_semaphore;
struct d3d12_device *device;
struct vkd3d_private_store private_store;
};
static inline struct d3d12_shared_fence *shared_impl_from_ID3D12Fence1(ID3D12Fence1 *iface)
{
extern CONST_VTBL struct ID3D12Fence1Vtbl d3d12_shared_fence_vtbl;
if (!iface)
return NULL;
assert(iface->lpVtbl == &d3d12_shared_fence_vtbl);
return CONTAINING_RECORD(iface, struct d3d12_shared_fence, ID3D12Fence_iface);
}
static inline struct d3d12_shared_fence *shared_impl_from_ID3D12Fence(ID3D12Fence *iface)
{
return shared_impl_from_ID3D12Fence1((ID3D12Fence1 *)iface);
}
HRESULT d3d12_shared_fence_create(struct d3d12_device *device,
uint64_t initial_value, D3D12_FENCE_FLAGS flags, struct d3d12_shared_fence **fence);
static inline bool is_shared_ID3D12Fence1(ID3D12Fence1 *iface)
{
extern CONST_VTBL struct ID3D12Fence1Vtbl d3d12_shared_fence_vtbl;
extern CONST_VTBL struct ID3D12Fence1Vtbl d3d12_fence_vtbl;
assert(iface->lpVtbl == &d3d12_shared_fence_vtbl || iface->lpVtbl == &d3d12_fence_vtbl);
return iface->lpVtbl == &d3d12_shared_fence_vtbl;
}
static inline bool is_shared_ID3D12Fence(ID3D12Fence *iface)
{
return is_shared_ID3D12Fence1((ID3D12Fence1 *)iface);
}
enum vkd3d_allocation_flag
{
VKD3D_ALLOCATION_FLAG_GLOBAL_BUFFER = (1u << 0),
@ -956,7 +904,7 @@ VkImageSubresource vk_image_subresource_from_d3d12(
HRESULT d3d12_resource_create_committed(struct d3d12_device *device, const D3D12_RESOURCE_DESC1 *desc,
const D3D12_HEAP_PROPERTIES *heap_properties, D3D12_HEAP_FLAGS heap_flags, D3D12_RESOURCE_STATES initial_state,
const D3D12_CLEAR_VALUE *optimized_clear_value, HANDLE shared_handle, struct d3d12_resource **resource);
const D3D12_CLEAR_VALUE *optimized_clear_value, struct d3d12_resource **resource);
HRESULT d3d12_resource_create_placed(struct d3d12_device *device, const D3D12_RESOURCE_DESC1 *desc,
struct d3d12_heap *heap, uint64_t heap_offset, D3D12_RESOURCE_STATES initial_state,
const D3D12_CLEAR_VALUE *optimized_clear_value, struct d3d12_resource **resource);
@ -1089,6 +1037,9 @@ struct vkd3d_descriptor_binding
#define VKD3D_RESOURCE_DESC_INCREMENT_LOG2 5
#define VKD3D_RESOURCE_DESC_INCREMENT (1u << VKD3D_RESOURCE_DESC_INCREMENT_LOG2)
/* FIXME: Gross hack for now. */
#define VKD3D_RESOURCE_DESC_DEFER_COPY_MASK ((uintptr_t)INTPTR_MIN)
/* Arrange data so that it can pack as tightly as possible.
* When we copy descriptors, we must copy both structures.
* In copy_desc_range we scan through the entire metadata_binding, so
@ -1219,6 +1170,7 @@ struct d3d12_descriptor_heap
{
ID3D12DescriptorHeap ID3D12DescriptorHeap_iface;
LONG refcount;
LONG internal_refcount;
uint64_t gpu_va;
D3D12_DESCRIPTOR_HEAP_DESC desc;
@ -1252,6 +1204,8 @@ struct d3d12_descriptor_heap
HRESULT d3d12_descriptor_heap_create(struct d3d12_device *device,
const D3D12_DESCRIPTOR_HEAP_DESC *desc, struct d3d12_descriptor_heap **descriptor_heap);
void d3d12_descriptor_heap_dec_ref(struct d3d12_descriptor_heap *heap);
void d3d12_descriptor_heap_inc_ref(struct d3d12_descriptor_heap *heap);
void d3d12_descriptor_heap_cleanup(struct d3d12_descriptor_heap *descriptor_heap);
static inline struct d3d12_descriptor_heap *impl_from_ID3D12DescriptorHeap(ID3D12DescriptorHeap *iface)
@ -1287,6 +1241,8 @@ static inline struct d3d12_desc_split d3d12_desc_decode_va(vkd3d_cpu_descriptor_
* Above that, we have the d3d12_descriptor_heap, which is allocated with enough alignment
* to contain these twiddle bits. */
va &= ~VKD3D_RESOURCE_DESC_DEFER_COPY_MASK;
num_bits_descriptors = va & (VKD3D_RESOURCE_DESC_INCREMENT - 1);
heap_offset = (va >> VKD3D_RESOURCE_DESC_INCREMENT_LOG2) & (((size_t)1 << num_bits_descriptors) - 1);
split.offset = (uint32_t)heap_offset;
@ -1308,6 +1264,54 @@ static inline uint32_t d3d12_desc_heap_offset_from_gpu_handle(D3D12_GPU_DESCRIPT
return (uint32_t)handle.ptr / VKD3D_RESOURCE_DESC_INCREMENT;
}
struct vkd3d_descriptor_update_ring_copy
{
void *src;
void *dst;
};
struct vkd3d_descriptor_update_deferred_release
{
struct d3d12_descriptor_heap *heap;
uint32_t write_count;
};
#define VKD3D_DESCRIPTOR_UPDATE_RING_SIZE (256 * 1024)
#define VKD3D_DESCRIPTOR_UPDATE_RING_MASK (VKD3D_DESCRIPTOR_UPDATE_RING_SIZE - 1)
struct vkd3d_descriptor_update_ring
{
struct vkd3d_descriptor_update_ring_copy *cbv_srv_uav_copies;
uint32_t write_count;
uint32_t read_count;
vkd3d_cpu_descriptor_va_t staging_base;
struct d3d12_descriptor_heap *staging;
uint32_t staging_write_count;
struct vkd3d_descriptor_update_deferred_release *deferred_releases;
size_t deferred_release_count;
size_t deferred_release_size;
pthread_mutex_t submission_lock;
};
/* When writing shader visible descriptors, defer the write and perform the work
* last minute in submission queues. */
HRESULT vkd3d_descriptor_update_ring_init(struct vkd3d_descriptor_update_ring *ring,
struct d3d12_device *device);
void vkd3d_descriptor_update_ring_cleanup(struct vkd3d_descriptor_update_ring *ring,
struct d3d12_device *device);
void vkd3d_descriptor_update_ring_flush(struct vkd3d_descriptor_update_ring *ring,
struct d3d12_device *device);
vkd3d_cpu_descriptor_va_t vkd3d_descriptor_update_ring_allocate_scratch(struct vkd3d_descriptor_update_ring *ring);
void vkd3d_descriptor_update_ring_push_copy(struct vkd3d_descriptor_update_ring *ring,
struct d3d12_device *device,
vkd3d_cpu_descriptor_va_t src, vkd3d_cpu_descriptor_va_t dst);
void vkd3d_descriptor_update_ring_mark_heap_destruction(struct vkd3d_descriptor_update_ring *ring,
struct d3d12_descriptor_heap *heap);
/* ID3D12QueryHeap */
struct d3d12_query_heap
{
@ -1455,10 +1459,6 @@ struct d3d12_root_signature
HRESULT d3d12_root_signature_create(struct d3d12_device *device, const void *bytecode,
size_t bytecode_length, struct d3d12_root_signature **root_signature);
HRESULT d3d12_root_signature_create_raw(struct d3d12_device *device, const void *payload,
size_t payload_size, struct d3d12_root_signature **root_signature);
HRESULT d3d12_root_signature_create_empty(struct d3d12_device *device,
struct d3d12_root_signature **root_signature);
/* Private ref counts, for pipeline library. */
void d3d12_root_signature_inc_ref(struct d3d12_root_signature *state);
void d3d12_root_signature_dec_ref(struct d3d12_root_signature *state);
@ -1481,14 +1481,9 @@ HRESULT vkd3d_create_pipeline_layout(struct d3d12_device *device,
VkPipelineLayout *pipeline_layout);
int vkd3d_parse_root_signature_v_1_0(const struct vkd3d_shader_code *dxbc,
struct vkd3d_versioned_root_signature_desc *desc,
vkd3d_shader_hash_t *compatibility_hash);
struct vkd3d_versioned_root_signature_desc *desc);
int vkd3d_parse_root_signature_v_1_1(const struct vkd3d_shader_code *dxbc,
struct vkd3d_versioned_root_signature_desc *desc,
vkd3d_shader_hash_t *compatibility_hash);
int vkd3d_parse_root_signature_v_1_1_from_raw_payload(const struct vkd3d_shader_code *dxbc,
struct vkd3d_versioned_root_signature_desc *desc,
vkd3d_shader_hash_t *compatibility_hash);
struct vkd3d_versioned_root_signature_desc *desc);
VkShaderStageFlags vkd3d_vk_stage_flags_from_visibility(D3D12_SHADER_VISIBILITY visibility);
enum vkd3d_shader_visibility vkd3d_shader_visibility_from_d3d12(D3D12_SHADER_VISIBILITY visibility);
@ -1518,11 +1513,10 @@ struct vkd3d_shader_debug_ring_spec_constants
uint32_t ring_words;
};
#define VKD3D_SHADER_DEBUG_RING_SPEC_INFO_MAP_ENTRIES 4
struct vkd3d_shader_debug_ring_spec_info
{
struct vkd3d_shader_debug_ring_spec_constants constants;
VkSpecializationMapEntry map_entries[VKD3D_SHADER_DEBUG_RING_SPEC_INFO_MAP_ENTRIES];
VkSpecializationMapEntry map_entries[4];
VkSpecializationInfo spec_info;
};
@ -2339,8 +2333,6 @@ struct vkd3d_queue
VkCommandPool barrier_pool;
VkCommandBuffer barrier_command_buffer;
VkSemaphore serializing_binary_semaphore;
VkSemaphore submission_timeline;
uint64_t submission_timeline_count;
uint32_t vk_family_index;
VkQueueFlags vk_queue_flags;
@ -2353,8 +2345,6 @@ struct vkd3d_queue
size_t wait_values_size;
VkPipelineStageFlags *wait_stages;
size_t wait_stages_size;
d3d12_fence_iface **wait_fences;
size_t wait_fences_size;
uint32_t wait_count;
};
@ -2363,7 +2353,7 @@ HRESULT vkd3d_queue_create(struct d3d12_device *device, uint32_t family_index, u
const VkQueueFamilyProperties *properties, struct vkd3d_queue **queue);
void vkd3d_queue_destroy(struct vkd3d_queue *queue, struct d3d12_device *device);
void vkd3d_queue_release(struct vkd3d_queue *queue);
void vkd3d_queue_add_wait(struct vkd3d_queue *queue, d3d12_fence_iface *waiter, VkSemaphore semaphore, uint64_t value);
void vkd3d_queue_add_wait(struct vkd3d_queue *queue, VkSemaphore semaphore, uint64_t value);
enum vkd3d_submission_type
{
@ -2399,13 +2389,13 @@ struct vkd3d_sparse_memory_bind_range
struct d3d12_command_queue_submission_wait
{
d3d12_fence_iface *fence;
struct d3d12_fence *fence;
UINT64 value;
};
struct d3d12_command_queue_submission_signal
{
d3d12_fence_iface *fence;
struct d3d12_fence *fence;
UINT64 value;
};
@ -2498,29 +2488,6 @@ struct vkd3d_execute_indirect_info
VkPipeline vk_pipeline;
};
enum vkd3d_patch_command_token
{
VKD3D_PATCH_COMMAND_TOKEN_COPY_CONST_U32 = 0,
VKD3D_PATCH_COMMAND_TOKEN_COPY_IBO_VA_LO = 1,
VKD3D_PATCH_COMMAND_TOKEN_COPY_IBO_VA_HI = 2,
VKD3D_PATCH_COMMAND_TOKEN_COPY_IBO_SIZE = 3,
VKD3D_PATCH_COMMAND_TOKEN_COPY_INDEX_FORMAT = 4,
VKD3D_PATCH_COMMAND_TOKEN_COPY_VBO_VA_LO = 5,
VKD3D_PATCH_COMMAND_TOKEN_COPY_VBO_VA_HI = 6,
VKD3D_PATCH_COMMAND_TOKEN_COPY_VBO_SIZE = 7,
VKD3D_PATCH_COMMAND_TOKEN_COPY_VBO_STRIDE = 8,
VKD3D_PATCH_COMMAND_TOKEN_COPY_ROOT_VA_LO = 9,
VKD3D_PATCH_COMMAND_TOKEN_COPY_ROOT_VA_HI = 10,
VKD3D_PATCH_COMMAND_TOKEN_COPY_VERTEX_COUNT = 11,
VKD3D_PATCH_COMMAND_TOKEN_COPY_INDEX_COUNT = 12,
VKD3D_PATCH_COMMAND_TOKEN_COPY_INSTANCE_COUNT = 13,
VKD3D_PATCH_COMMAND_TOKEN_COPY_FIRST_INDEX = 14,
VKD3D_PATCH_COMMAND_TOKEN_COPY_FIRST_VERTEX = 15,
VKD3D_PATCH_COMMAND_TOKEN_COPY_FIRST_INSTANCE = 16,
VKD3D_PATCH_COMMAND_TOKEN_COPY_VERTEX_OFFSET = 17,
VKD3D_PATCH_COMMAND_INT_MAX = 0x7fffffff
};
/* ID3D12CommandSignature */
struct d3d12_command_signature
{
@ -2625,7 +2592,6 @@ enum vkd3d_breadcrumb_command_type
VKD3D_BREADCRUMB_COMMAND_DRAW_INDEXED,
VKD3D_BREADCRUMB_COMMAND_DISPATCH,
VKD3D_BREADCRUMB_COMMAND_EXECUTE_INDIRECT,
VKD3D_BREADCRUMB_COMMAND_EXECUTE_INDIRECT_TEMPLATE,
VKD3D_BREADCRUMB_COMMAND_COPY,
VKD3D_BREADCRUMB_COMMAND_RESOLVE,
VKD3D_BREADCRUMB_COMMAND_WBI,
@ -2642,7 +2608,6 @@ enum vkd3d_breadcrumb_command_type
VKD3D_BREADCRUMB_COMMAND_IBO,
VKD3D_BREADCRUMB_COMMAND_ROOT_DESC,
VKD3D_BREADCRUMB_COMMAND_ROOT_CONST,
VKD3D_BREADCRUMB_COMMAND_TAG,
};
#ifdef VKD3D_ENABLE_BREADCRUMBS
@ -2666,8 +2631,6 @@ struct vkd3d_breadcrumb_command
uint32_t word_32bit;
uint64_t word_64bit;
uint32_t count;
/* Pointer must remain alive. */
const char *tag;
};
};
@ -3134,10 +3097,6 @@ struct vkd3d_execute_indirect_args
VkDeviceAddress dst_indirect_count_va;
uint32_t api_buffer_word_stride;
uint32_t device_generated_commands_word_stride;
/* Arbitrary tag used for debug version of state patcher. Debug messages from tag 0 are ignored. */
uint32_t debug_tag;
uint32_t implicit_instance;
};
struct vkd3d_execute_indirect_pipeline
@ -3422,6 +3381,7 @@ struct d3d12_device
struct vkd3d_sampler_state sampler_state;
struct vkd3d_shader_debug_ring debug_ring;
struct vkd3d_pipeline_library_disk_cache disk_cache;
struct vkd3d_descriptor_update_ring descriptor_update_ring;
#ifdef VKD3D_ENABLE_BREADCRUMBS
struct vkd3d_breadcrumb_tracer breadcrumb_tracer;
#endif
@ -3598,14 +3558,6 @@ struct d3d12_state_object_identifier
/* The index into vkGetShaderStackSize and friends for pGroups[]. */
uint32_t group_index;
/* For AddToStateObject(). We need to return the identifier pointer
* for the parent, not the child. This makes it easy to validate that
* we observe the same SBT handles as specified by DXR 1.1. */
/* If -1, ignore, otherwise, redirect. */
int inherited_collection_index;
uint32_t inherited_collection_export_index;
};
struct d3d12_state_object_stack_info
@ -3618,15 +3570,6 @@ struct d3d12_state_object_stack_info
uint32_t max_closest;
};
#ifdef VKD3D_ENABLE_BREADCRUMBS
struct d3d12_state_object_breadcrumb_shader
{
vkd3d_shader_hash_t hash;
VkShaderStageFlagBits stage;
char name[64];
};
#endif
struct d3d12_state_object
{
d3d12_state_object_iface ID3D12StateObject_iface;
@ -3648,14 +3591,7 @@ struct d3d12_state_object
/* Normally stages_count == entry_points_count, but entry_points is the entry points we
* export externally, and stages_count matches pStages[] size for purposes of index fixups. */
/* Can be bound. */
VkPipeline pipeline;
/* Can be used as a library. */
VkPipeline pipeline_library;
/* Can be inherited by AddToStateObject(). */
D3D12_RAYTRACING_PIPELINE_CONFIG1 pipeline_config;
D3D12_RAYTRACING_SHADER_CONFIG shader_config;
struct
{
@ -3664,8 +3600,6 @@ struct d3d12_state_object
VkDescriptorSet desc_set;
VkDescriptorPool desc_pool;
uint32_t set_index;
uint64_t compatibility_hash;
bool owned_handles;
} local_static_sampler;
UINT64 pipeline_stack_size;
@ -3674,23 +3608,10 @@ struct d3d12_state_object
struct d3d12_state_object **collections;
size_t collections_count;
struct d3d12_root_signature *global_root_signature;
#ifdef VKD3D_ENABLE_BREADCRUMBS
/* For breadcrumbs. */
struct d3d12_state_object_breadcrumb_shader *breadcrumb_shaders;
size_t breadcrumb_shaders_size;
size_t breadcrumb_shaders_count;
#endif
struct vkd3d_private_store private_store;
};
HRESULT d3d12_state_object_create(struct d3d12_device *device, const D3D12_STATE_OBJECT_DESC *desc,
struct d3d12_state_object *parent,
struct d3d12_state_object **object);
HRESULT d3d12_state_object_add(struct d3d12_device *device, const D3D12_STATE_OBJECT_DESC *desc,
struct d3d12_state_object *parent,
struct d3d12_state_object **object);
static inline struct d3d12_state_object *impl_from_ID3D12StateObject(ID3D12StateObject *iface)
@ -3962,74 +3883,6 @@ void vkd3d_acceleration_structure_copy(
D3D12_GPU_VIRTUAL_ADDRESS dst, D3D12_GPU_VIRTUAL_ADDRESS src,
D3D12_RAYTRACING_ACCELERATION_STRUCTURE_COPY_MODE mode);
typedef enum D3D11_USAGE
{
D3D11_USAGE_DEFAULT,
D3D11_USAGE_IMMUTABLE,
D3D11_USAGE_DYNAMIC,
D3D11_USAGE_STAGING,
} D3D11_USAGE;
typedef enum D3D11_BIND_FLAG
{
D3D11_BIND_VERTEX_BUFFER = 0x0001,
D3D11_BIND_INDEX_BUFFER = 0x0002,
D3D11_BIND_CONSTANT_BUFFER = 0x0004,
D3D11_BIND_SHADER_RESOURCE = 0x0008,
D3D11_BIND_STREAM_OUTPUT = 0x0010,
D3D11_BIND_RENDER_TARGET = 0x0020,
D3D11_BIND_DEPTH_STENCIL = 0x0040,
D3D11_BIND_UNORDERED_ACCESS = 0x0080,
D3D11_BIND_DECODER = 0x0200,
D3D11_BIND_VIDEO_ENCODER = 0x0400
} D3D11_BIND_FLAG;
typedef enum D3D11_TEXTURE_LAYOUT
{
D3D11_TEXTURE_LAYOUT_UNDEFINED = 0x0,
D3D11_TEXTURE_LAYOUT_ROW_MAJOR = 0x1,
D3D11_TEXTURE_LAYOUT_64K_STANDARD_SWIZZLE = 0x2,
} D3D11_TEXTURE_LAYOUT;
typedef enum D3D11_RESOURCE_MISC_FLAG
{
D3D11_RESOURCE_MISC_GENERATE_MIPS = 0x1,
D3D11_RESOURCE_MISC_SHARED = 0x2,
D3D11_RESOURCE_MISC_TEXTURECUBE = 0x4,
D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS = 0x10,
D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS = 0x20,
D3D11_RESOURCE_MISC_BUFFER_STRUCTURED = 0x40,
D3D11_RESOURCE_MISC_RESOURCE_CLAMP = 0x80,
D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX = 0x100,
D3D11_RESOURCE_MISC_GDI_COMPATIBLE = 0x200,
D3D11_RESOURCE_MISC_SHARED_NTHANDLE = 0x800,
D3D11_RESOURCE_MISC_RESTRICTED_CONTENT = 0x1000,
D3D11_RESOURCE_MISC_RESTRICT_SHARED_RESOURCE = 0x2000,
D3D11_RESOURCE_MISC_RESTRICT_SHARED_RESOURCE_DRIVER = 0x4000,
D3D11_RESOURCE_MISC_GUARDED = 0x8000,
D3D11_RESOURCE_MISC_TILE_POOL = 0x20000,
D3D11_RESOURCE_MISC_TILED = 0x40000,
D3D11_RESOURCE_MISC_HW_PROTECTED = 0x80000,
} D3D11_RESOURCE_MISC_FLAG;
struct DxvkSharedTextureMetadata {
UINT Width;
UINT Height;
UINT MipLevels;
UINT ArraySize;
DXGI_FORMAT Format;
DXGI_SAMPLE_DESC SampleDesc;
D3D11_USAGE Usage;
UINT BindFlags;
UINT CPUAccessFlags;
UINT MiscFlags;
D3D11_TEXTURE_LAYOUT TextureLayout;
};
bool vkd3d_set_shared_metadata(HANDLE handle, void *buf, uint32_t buf_size);
bool vkd3d_get_shared_metadata(HANDLE handle, void *buf, uint32_t buf_size, uint32_t *metadata_size);
HANDLE vkd3d_open_kmt_handle(HANDLE kmt_handle);
#define VKD3D_VENDOR_ID_NVIDIA 0x10DE
#define VKD3D_VENDOR_ID_AMD 0x1002
#define VKD3D_VENDOR_ID_INTEL 0x8086

View File

@ -46,7 +46,6 @@ enum vkd3d_meta_copy_mode
#include <cs_resolve_predicate.h>
#include <cs_resolve_query.h>
#include <cs_execute_indirect_patch.h>
#include <cs_execute_indirect_patch_debug_ring.h>
#include <vs_fullscreen_layer.h>
#include <vs_fullscreen.h>
#include <gs_fullscreen.h>

View File

@ -49,7 +49,6 @@ VK_INSTANCE_PFN(vkGetPhysicalDeviceQueueFamilyProperties)
VK_INSTANCE_PFN(vkGetPhysicalDeviceSparseImageFormatProperties)
VK_INSTANCE_PFN(vkGetPhysicalDeviceFeatures2)
VK_INSTANCE_PFN(vkGetPhysicalDeviceProperties2)
VK_INSTANCE_PFN(vkGetPhysicalDeviceExternalSemaphoreProperties)
/* VK_EXT_debug_utils */
VK_INSTANCE_EXT_PFN(vkCreateDebugUtilsMessengerEXT)
@ -224,18 +223,6 @@ VK_DEVICE_EXT_PFN(vkGetDeviceBufferMemoryRequirementsKHR)
VK_DEVICE_EXT_PFN(vkGetDeviceImageMemoryRequirementsKHR)
VK_DEVICE_EXT_PFN(vkGetDeviceImageSparseMemoryRequirementsKHR)
#ifdef VK_KHR_external_memory_win32
/* VK_KHR_external_memory_win32 */
VK_DEVICE_EXT_PFN(vkGetMemoryWin32HandleKHR)
VK_DEVICE_EXT_PFN(vkGetMemoryWin32HandlePropertiesKHR)
#endif
#ifdef VK_KHR_external_semaphore_win32
/* VK_KHR_external_semaphore_win32 */
VK_DEVICE_EXT_PFN(vkGetSemaphoreWin32HandleKHR)
VK_DEVICE_EXT_PFN(vkImportSemaphoreWin32HandleKHR)
#endif
/* VK_EXT_calibrated_timestamps */
VK_DEVICE_EXT_PFN(vkGetCalibratedTimestampsEXT)
VK_INSTANCE_EXT_PFN(vkGetPhysicalDeviceCalibrateableTimeDomainsEXT)

View File

@ -83,7 +83,7 @@ idl_generator = generator(idl_compiler,
arguments : [ '-h', '-o', '@OUTPUT@', '@INPUT@' ])
glsl_compiler = find_program('glslangValidator')
glsl_args = [ '-V', '--target-env', 'vulkan1.1', '--vn', '@BASENAME@', '@INPUT@', '-o', '@OUTPUT@' ]
glsl_args = [ '-V', '--vn', '@BASENAME@', '@INPUT@', '-o', '@OUTPUT@' ]
if run_command(glsl_compiler, [ '--quiet', '--version' ], check : false).returncode() == 0
glsl_args += [ '--quiet' ]
endif

@ -1 +1 @@
Subproject commit 9f2fd6356c14376ab5b88518d6dd4e6787084525
Subproject commit b1afbf5fa8e6f10f6c226cea222a8a5d5518870f

View File

@ -1449,721 +1449,6 @@ void test_vbv_stride_edge_cases(void)
destroy_test_context(&context);
}
void test_execute_indirect_state(void)
{
static const struct vec4 values = { 1000.0f, 2000.0f, 3000.0f, 4000.0f };
D3D12_INDIRECT_ARGUMENT_DESC indirect_argument_descs[2];
D3D12_COMMAND_SIGNATURE_DESC command_signature_desc;
D3D12_ROOT_SIGNATURE_DESC root_signature_desc;
D3D12_GRAPHICS_PIPELINE_STATE_DESC pso_desc;
ID3D12CommandSignature *command_signature;
D3D12_SO_DECLARATION_ENTRY so_entries[1];
ID3D12GraphicsCommandList *command_list;
D3D12_ROOT_PARAMETER root_parameters[4];
ID3D12RootSignature *root_signatures[2];
ID3D12Resource *argument_buffer_late;
D3D12_STREAM_OUTPUT_BUFFER_VIEW sov;
ID3D12Resource *streamout_buffer;
D3D12_VERTEX_BUFFER_VIEW vbvs[2];
ID3D12Resource *argument_buffer;
struct test_context_desc desc;
ID3D12PipelineState *psos[2];
struct test_context context;
struct resource_readback rb;
D3D12_INDEX_BUFFER_VIEW ibv;
ID3D12CommandQueue *queue;
const UINT so_stride = 16;
ID3D12Resource *vbo[3];
ID3D12Resource *ibo[2];
unsigned int i, j, k;
ID3D12Resource *cbv;
ID3D12Resource *srv;
ID3D12Resource *uav;
HRESULT hr;
static const D3D12_INPUT_ELEMENT_DESC layout_desc[] =
{
{"COLOR", 0, DXGI_FORMAT_R32_FLOAT, 0, 0, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0},
{"COLOR", 1, DXGI_FORMAT_R32_FLOAT, 1, 0, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0},
};
struct test
{
const D3D12_INDIRECT_ARGUMENT_DESC *indirect_arguments;
uint32_t indirect_argument_count;
const void *argument_buffer_data;
size_t argument_buffer_size;
uint32_t api_max_count;
const struct vec4 *expected_output;
uint32_t expected_output_count;
uint32_t stride;
uint32_t pso_index;
bool needs_root_sig;
};
/* Modify root parameters. */
struct root_constant_data
{
float constants[2];
D3D12_DRAW_INDEXED_ARGUMENTS indexed;
};
static const D3D12_INDIRECT_ARGUMENT_DESC root_constant_sig[2] =
{
{ .Type = D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT, .Constant = {
.RootParameterIndex = 0, .DestOffsetIn32BitValues = 1, .Num32BitValuesToSet = 2 }},
{ .Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED }
};
static const struct root_constant_data root_constant_data[] =
{
{
.constants = { 100.0f, 500.0f },
.indexed = { .IndexCountPerInstance = 2, .InstanceCount = 1 }
},
{
.constants = { 200.0f, 800.0f },
.indexed = { .IndexCountPerInstance = 1, .InstanceCount = 2,
.StartIndexLocation = 1, .StartInstanceLocation = 100, }
},
};
static const struct vec4 root_constant_expected[] =
{
{ 1000.0f, 64.0f + 100.0f, 500.0f, 4000.0f },
{ 1001.0f, 65.0f + 100.0f, 500.0f, 4000.0f },
{ 1001.0f, 65.0f + 200.0f, 800.0f, 4000.0f },
{ 1001.0f, 65.0f + 200.0f, 800.0f, 4001.0f },
};
/* Modify root parameters, but very large root signature to test boundary conditions. */
static const D3D12_INDIRECT_ARGUMENT_DESC root_constant_spill_sig[2] =
{
{ .Type = D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT, .Constant = {
.RootParameterIndex = 0, .DestOffsetIn32BitValues = 44 + 1, .Num32BitValuesToSet = 2 }},
{ .Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED }
};
static const struct root_constant_data root_constant_spill_data[] =
{
{
.constants = { 100.0f, 500.0f },
.indexed = { .IndexCountPerInstance = 2, .InstanceCount = 1 }
},
{
.constants = { 200.0f, 800.0f },
.indexed = { .IndexCountPerInstance = 1, .InstanceCount = 2,
.StartIndexLocation = 1, .StartInstanceLocation = 100, }
},
};
static const struct vec4 root_constant_spill_expected[] =
{
{ 1000.0f, 64.0f + 100.0f, 500.0f, 4000.0f },
{ 1001.0f, 65.0f + 100.0f, 500.0f, 4000.0f },
{ 1001.0f, 65.0f + 200.0f, 800.0f, 4000.0f },
{ 1001.0f, 65.0f + 200.0f, 800.0f, 4001.0f },
};
/* Modify VBOs. */
struct indirect_vbo_data
{
D3D12_VERTEX_BUFFER_VIEW view[2];
D3D12_DRAW_INDEXED_ARGUMENTS indexed;
};
static const D3D12_INDIRECT_ARGUMENT_DESC indirect_vbo_sig[3] =
{
{ .Type = D3D12_INDIRECT_ARGUMENT_TYPE_VERTEX_BUFFER_VIEW, .VertexBuffer = { .Slot = 0 }},
{ .Type = D3D12_INDIRECT_ARGUMENT_TYPE_VERTEX_BUFFER_VIEW, .VertexBuffer = { .Slot = 1 }},
{ .Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED },
};
/* Fill buffer locations later. */
struct indirect_vbo_data indirect_vbo_data[] =
{
{
.view = { { 0, 64, 8 }, { 0, 64, 16 } },
.indexed = { .IndexCountPerInstance = 2, .InstanceCount = 2 }
},
{
/* Test indirectly binding NULL descriptor and 0 stride. */
.view = { { 0, 0, 0 }, { 0, 64, 0 } },
.indexed = { .IndexCountPerInstance = 2, .InstanceCount = 1 }
}
};
static const struct vec4 indirect_vbo_expected[] =
{
{ 1064.0f, 2128.0f, 3000.0f, 4000.0f },
{ 1066.0f, 2132.0f, 3000.0f, 4000.0f },
{ 1064.0f, 2128.0f, 3000.0f, 4001.0f },
{ 1066.0f, 2132.0f, 3000.0f, 4001.0f },
{ 1000.0f, 2016.0f, 3000.0f, 4000.0f }, /* This is buggy on WARP and AMD. We seem to get null descriptor instead. */
{ 1000.0f, 2016.0f, 3000.0f, 4000.0f }, /* This is buggy on WARP and AMD. */
};
/* Modify just one VBO. */
struct indirect_vbo_one_data
{
D3D12_VERTEX_BUFFER_VIEW view;
D3D12_DRAW_INDEXED_ARGUMENTS indexed;
};
static const D3D12_INDIRECT_ARGUMENT_DESC indirect_vbo_one_sig[2] =
{
{ .Type = D3D12_INDIRECT_ARGUMENT_TYPE_VERTEX_BUFFER_VIEW, .VertexBuffer = { .Slot = 0 }},
{ .Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED },
};
/* Fill buffer locations later. */
struct indirect_vbo_one_data indirect_vbo_one_data[] =
{
{
.view = { 0, 64, 8 },
.indexed = { .IndexCountPerInstance = 2, .InstanceCount = 1 }
},
{
.indexed = { .IndexCountPerInstance = 1, .InstanceCount = 1 }
}
};
static const struct vec4 indirect_vbo_one_expected[] =
{
{ 1128.0f, 2064.0f, 3000.0f, 4000.0f },
{ 1130.0f, 2065.0f, 3000.0f, 4000.0f },
{ 1000.0f, 2064.0f, 3000.0f, 4000.0f },
};
/* Indirect IBO */
struct indirect_ibo_data
{
D3D12_INDEX_BUFFER_VIEW view;
D3D12_DRAW_INDEXED_ARGUMENTS indexed;
};
static const D3D12_INDIRECT_ARGUMENT_DESC indirect_ibo_sig[2] =
{
{ .Type = D3D12_INDIRECT_ARGUMENT_TYPE_INDEX_BUFFER_VIEW },
{ .Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED },
};
struct indirect_ibo_data indirect_ibo_data[] =
{
{
.view = { 0, 0, DXGI_FORMAT_R32_UINT },
.indexed = { .IndexCountPerInstance = 2, .InstanceCount = 1 }
},
{
.view = { 0, 64, DXGI_FORMAT_R16_UINT },
.indexed = { .IndexCountPerInstance = 4, .InstanceCount = 1 }
},
};
static const struct vec4 indirect_ibo_expected[] =
{
{ 1000.0f, 2064.0f, 3000.0f, 4000.0f },
{ 1000.0f, 2064.0f, 3000.0f, 4000.0f },
{ 1016.0f, 2080.0f, 3000.0f, 4000.0f },
{ 1000.0f, 2064.0f, 3000.0f, 4000.0f },
{ 1017.0f, 2081.0f, 3000.0f, 4000.0f },
{ 1000.0f, 2064.0f, 3000.0f, 4000.0f },
};
/* Indirect root arguments */
struct indirect_root_descriptor_data
{
D3D12_GPU_VIRTUAL_ADDRESS cbv;
D3D12_GPU_VIRTUAL_ADDRESS srv;
D3D12_GPU_VIRTUAL_ADDRESS uav;
D3D12_DRAW_ARGUMENTS array;
};
static const D3D12_INDIRECT_ARGUMENT_DESC indirect_root_descriptor_sig[4] =
{
{ .Type = D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT_BUFFER_VIEW, .ConstantBufferView = { .RootParameterIndex = 1 } },
{ .Type = D3D12_INDIRECT_ARGUMENT_TYPE_SHADER_RESOURCE_VIEW, .ShaderResourceView = { .RootParameterIndex = 2 } },
{ .Type = D3D12_INDIRECT_ARGUMENT_TYPE_UNORDERED_ACCESS_VIEW, .UnorderedAccessView = { .RootParameterIndex = 3 } },
{ .Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW },
};
struct indirect_root_descriptor_data indirect_root_descriptor_data[] =
{
{ .array = { .VertexCountPerInstance = 1, .InstanceCount = 1 } },
{ .array = { .VertexCountPerInstance = 1, .InstanceCount = 1 } },
};
static const struct vec4 indirect_root_descriptor_expected[] =
{
{ 1000.0f, 2064.0f, 3000.0f + 64.0f, 4000.0f + 2.0f },
{ 1000.0f, 2064.0f, 3000.0f + 128.0f, 4000.0f + 3.0f },
};
/* Test packing rules.
* 64-bit aligned values are tightly packed with 32-bit alignment when they are in indirect command buffers. */
struct indirect_alignment_data
{
float value;
uint32_t cbv_va[2];
D3D12_DRAW_ARGUMENTS arrays;
};
static const D3D12_INDIRECT_ARGUMENT_DESC indirect_alignment_sig[3] =
{
{ .Type = D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT, .Constant = {
.RootParameterIndex = 0, .DestOffsetIn32BitValues = 1, .Num32BitValuesToSet = 1 }},
{ .Type = D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT_BUFFER_VIEW, .ConstantBufferView = { .RootParameterIndex = 1 }},
{ .Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW },
};
struct indirect_alignment_data indirect_alignment_data[] =
{
{
.value = 5.0f,
.arrays = { .VertexCountPerInstance = 1, .InstanceCount = 1 }
},
{
.value = 6.0f,
.arrays = { .VertexCountPerInstance = 1, .InstanceCount = 1 }
},
};
static const struct vec4 indirect_alignment_expected[] =
{
{ 1000.0f, 69.0f, 3064.0f, 4000.0f },
{ 1000.0f, 70.0f, 3128.0f, 4000.0f },
};
#define DECL_TEST(t, pso_index, needs_root_sig) { t##_sig, ARRAY_SIZE(t##_sig), t##_data, sizeof(t##_data), ARRAY_SIZE(t##_data), \
t##_expected, ARRAY_SIZE(t##_expected), sizeof(*(t##_data)), pso_index, needs_root_sig }
const struct test tests[] =
{
DECL_TEST(root_constant, 0, true),
DECL_TEST(indirect_vbo, 0, false),
DECL_TEST(indirect_vbo_one, 0, false),
DECL_TEST(indirect_ibo, 0, false),
DECL_TEST(indirect_root_descriptor, 0, true),
DECL_TEST(indirect_alignment, 0, true),
DECL_TEST(root_constant_spill, 1, true),
DECL_TEST(indirect_root_descriptor, 1, true),
};
#undef DECL_TEST
uint32_t ibo_data[ARRAY_SIZE(ibo)][64];
float vbo_data[ARRAY_SIZE(vbo)][64];
float generic_data[4096];
static const DWORD vs_code_small_cbv[] =
{
#if 0
cbuffer RootCBV : register(b0)
{
float a;
};
StructuredBuffer<float> RootSRV : register(t0);
cbuffer RootConstants : register(b0, space1)
{
float4 root;
};
float4 main(float c0 : COLOR0, float c1 : COLOR1, uint iid : SV_InstanceID) : SV_Position
{
return float4(c0, c1, a, RootSRV[0] + float(iid)) + root;
}
#endif
0x43425844, 0x33b7b302, 0x34259b9b, 0x3e8568d9, 0x5a5e0c3e, 0x00000001, 0x00000268, 0x00000003,
0x0000002c, 0x00000098, 0x000000cc, 0x4e475349, 0x00000064, 0x00000003, 0x00000008, 0x00000050,
0x00000000, 0x00000000, 0x00000003, 0x00000000, 0x00000101, 0x00000050, 0x00000001, 0x00000000,
0x00000003, 0x00000001, 0x00000101, 0x00000056, 0x00000000, 0x00000008, 0x00000001, 0x00000002,
0x00000101, 0x4f4c4f43, 0x56530052, 0x736e495f, 0x636e6174, 0x00444965, 0x4e47534f, 0x0000002c,
0x00000001, 0x00000008, 0x00000020, 0x00000000, 0x00000001, 0x00000003, 0x00000000, 0x0000000f,
0x505f5653, 0x7469736f, 0x006e6f69, 0x58454853, 0x00000194, 0x00010051, 0x00000065, 0x0100086a,
0x07000059, 0x00308e46, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000000, 0x07000059,
0x00308e46, 0x00000001, 0x00000000, 0x00000000, 0x00000001, 0x00000001, 0x070000a2, 0x00307e46,
0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000000, 0x0300005f, 0x00101012, 0x00000000,
0x0300005f, 0x00101012, 0x00000001, 0x04000060, 0x00101012, 0x00000002, 0x00000008, 0x04000067,
0x001020f2, 0x00000000, 0x00000001, 0x02000068, 0x00000001, 0x0a0000a7, 0x00100012, 0x00000000,
0x00004001, 0x00000000, 0x00004001, 0x00000000, 0x00207006, 0x00000000, 0x00000000, 0x05000056,
0x00100022, 0x00000000, 0x0010100a, 0x00000002, 0x07000000, 0x00100012, 0x00000000, 0x0010001a,
0x00000000, 0x0010000a, 0x00000000, 0x09000000, 0x00102012, 0x00000000, 0x0010100a, 0x00000000,
0x0030800a, 0x00000001, 0x00000000, 0x00000000, 0x09000000, 0x00102022, 0x00000000, 0x0010100a,
0x00000001, 0x0030801a, 0x00000001, 0x00000000, 0x00000000, 0x0b000000, 0x00102042, 0x00000000,
0x0030800a, 0x00000000, 0x00000000, 0x00000000, 0x0030802a, 0x00000001, 0x00000000, 0x00000000,
0x09000000, 0x00102082, 0x00000000, 0x0010000a, 0x00000000, 0x0030803a, 0x00000001, 0x00000000,
0x00000000, 0x0100003e,
};
static const DWORD vs_code_large_cbv[] =
{
#if 0
cbuffer RootCBV : register(b0)
{
float a;
};
StructuredBuffer<float> RootSRV : register(t0);
cbuffer RootConstants : register(b0, space1)
{
// Cannot use arrays for root constants in D3D12.
float4 pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7, pad8, pad9, pad10;
float4 root;
};
float4 main(float c0 : COLOR0, float c1 : COLOR1, uint iid : SV_InstanceID) : SV_Position
{
return float4(c0, c1, a, RootSRV[0] + float(iid)) + root;
}
#endif
0x43425844, 0x99a057e8, 0x20344569, 0x434f8a7a, 0xf9171e08, 0x00000001, 0x00000268, 0x00000003,
0x0000002c, 0x00000098, 0x000000cc, 0x4e475349, 0x00000064, 0x00000003, 0x00000008, 0x00000050,
0x00000000, 0x00000000, 0x00000003, 0x00000000, 0x00000101, 0x00000050, 0x00000001, 0x00000000,
0x00000003, 0x00000001, 0x00000101, 0x00000056, 0x00000000, 0x00000008, 0x00000001, 0x00000002,
0x00000101, 0x4f4c4f43, 0x56530052, 0x736e495f, 0x636e6174, 0x00444965, 0x4e47534f, 0x0000002c,
0x00000001, 0x00000008, 0x00000020, 0x00000000, 0x00000001, 0x00000003, 0x00000000, 0x0000000f,
0x505f5653, 0x7469736f, 0x006e6f69, 0x58454853, 0x00000194, 0x00010051, 0x00000065, 0x0100086a,
0x07000059, 0x00308e46, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000000, 0x07000059,
0x00308e46, 0x00000001, 0x00000000, 0x00000000, 0x0000000c, 0x00000001, 0x070000a2, 0x00307e46,
0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000000, 0x0300005f, 0x00101012, 0x00000000,
0x0300005f, 0x00101012, 0x00000001, 0x04000060, 0x00101012, 0x00000002, 0x00000008, 0x04000067,
0x001020f2, 0x00000000, 0x00000001, 0x02000068, 0x00000001, 0x0a0000a7, 0x00100012, 0x00000000,
0x00004001, 0x00000000, 0x00004001, 0x00000000, 0x00207006, 0x00000000, 0x00000000, 0x05000056,
0x00100022, 0x00000000, 0x0010100a, 0x00000002, 0x07000000, 0x00100012, 0x00000000, 0x0010001a,
0x00000000, 0x0010000a, 0x00000000, 0x09000000, 0x00102012, 0x00000000, 0x0010100a, 0x00000000,
0x0030800a, 0x00000001, 0x00000000, 0x0000000b, 0x09000000, 0x00102022, 0x00000000, 0x0010100a,
0x00000001, 0x0030801a, 0x00000001, 0x00000000, 0x0000000b, 0x0b000000, 0x00102042, 0x00000000,
0x0030800a, 0x00000000, 0x00000000, 0x00000000, 0x0030802a, 0x00000001, 0x00000000, 0x0000000b,
0x09000000, 0x00102082, 0x00000000, 0x0010000a, 0x00000000, 0x0030803a, 0x00000001, 0x00000000,
0x0000000b, 0x0100003e,
};
memset(&desc, 0, sizeof(desc));
desc.no_root_signature = true;
desc.no_pipeline = true;
if (!init_test_context(&context, &desc))
return;
command_list = context.list;
queue = context.queue;
for (j = 0; j < ARRAY_SIZE(ibo); j++)
for (i = 0; i < ARRAY_SIZE(ibo_data[j]); i++)
ibo_data[j][i] = j * 16 + i;
for (j = 0; j < ARRAY_SIZE(vbo); j++)
for (i = 0; i < ARRAY_SIZE(vbo_data[j]); i++)
vbo_data[j][i] = (float)(j * ARRAY_SIZE(vbo_data[j]) + i);
for (i = 0; i < ARRAY_SIZE(generic_data); i++)
generic_data[i] = (float)i;
for (i = 0; i < ARRAY_SIZE(ibo); i++)
ibo[i] = create_upload_buffer(context.device, sizeof(ibo_data[i]), ibo_data[i]);
for (i = 0; i < ARRAY_SIZE(vbo); i++)
vbo[i] = create_upload_buffer(context.device, sizeof(vbo_data[i]), vbo_data[i]);
cbv = create_upload_buffer(context.device, sizeof(generic_data), generic_data);
srv = create_upload_buffer(context.device, sizeof(generic_data), generic_data);
uav = create_default_buffer(context.device, sizeof(generic_data),
D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS,
D3D12_RESOURCE_STATE_UNORDERED_ACCESS);
indirect_vbo_data[0].view[0].BufferLocation = ID3D12Resource_GetGPUVirtualAddress(vbo[1]);
indirect_vbo_data[0].view[1].BufferLocation = ID3D12Resource_GetGPUVirtualAddress(vbo[2]);
indirect_vbo_data[1].view[0].BufferLocation = 0;
indirect_vbo_data[1].view[1].BufferLocation = ID3D12Resource_GetGPUVirtualAddress(vbo[0]) + 64;
indirect_vbo_one_data[0].view.BufferLocation = ID3D12Resource_GetGPUVirtualAddress(vbo[2]);
indirect_vbo_one_data[1].view.BufferLocation = 0;
indirect_ibo_data[1].view.BufferLocation = ID3D12Resource_GetGPUVirtualAddress(ibo[1]);
indirect_root_descriptor_data[0].cbv = ID3D12Resource_GetGPUVirtualAddress(cbv) + 256;
indirect_root_descriptor_data[0].srv = ID3D12Resource_GetGPUVirtualAddress(srv) + 8;
indirect_root_descriptor_data[0].uav = ID3D12Resource_GetGPUVirtualAddress(uav) + 4;
indirect_root_descriptor_data[1].cbv = ID3D12Resource_GetGPUVirtualAddress(cbv) + 512;
indirect_root_descriptor_data[1].srv = ID3D12Resource_GetGPUVirtualAddress(srv) + 12;
indirect_root_descriptor_data[1].uav = ID3D12Resource_GetGPUVirtualAddress(uav) + 8;
memcpy(indirect_alignment_data[0].cbv_va, &indirect_root_descriptor_data[0].cbv, sizeof(D3D12_GPU_VIRTUAL_ADDRESS));
memcpy(indirect_alignment_data[1].cbv_va, &indirect_root_descriptor_data[1].cbv, sizeof(D3D12_GPU_VIRTUAL_ADDRESS));
memset(&root_signature_desc, 0, sizeof(root_signature_desc));
root_signature_desc.Flags = D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT |
D3D12_ROOT_SIGNATURE_FLAG_ALLOW_STREAM_OUTPUT;
memset(root_parameters, 0, sizeof(root_parameters));
root_signature_desc.pParameters = root_parameters;
root_signature_desc.NumParameters = ARRAY_SIZE(root_parameters);
root_parameters[0].ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
root_parameters[0].ParameterType = D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS;
root_parameters[0].Constants.RegisterSpace = 1;
root_parameters[0].Constants.Num32BitValues = 4;
root_parameters[1].ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
root_parameters[1].ParameterType = D3D12_ROOT_PARAMETER_TYPE_CBV;
root_parameters[2].ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
root_parameters[2].ParameterType = D3D12_ROOT_PARAMETER_TYPE_SRV;
root_parameters[3].ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
root_parameters[3].ParameterType = D3D12_ROOT_PARAMETER_TYPE_UAV;
hr = create_root_signature(context.device, &root_signature_desc, &root_signatures[0]);
ok(SUCCEEDED(hr), "Failed to create root signature, hr #%x.\n", hr);
root_parameters[0].Constants.Num32BitValues = 48;
hr = create_root_signature(context.device, &root_signature_desc, &root_signatures[1]);
ok(SUCCEEDED(hr), "Failed to create root signature, hr #%x.\n", hr);
memset(so_entries, 0, sizeof(so_entries));
so_entries[0].ComponentCount = 4;
so_entries[0].SemanticName = "SV_Position";
memset(&pso_desc, 0, sizeof(pso_desc));
pso_desc.VS.pShaderBytecode = vs_code_small_cbv;
pso_desc.VS.BytecodeLength = sizeof(vs_code_small_cbv);
pso_desc.StreamOutput.NumStrides = 1;
pso_desc.StreamOutput.pBufferStrides = &so_stride;
pso_desc.StreamOutput.pSODeclaration = so_entries;
pso_desc.StreamOutput.NumEntries = ARRAY_SIZE(so_entries);
pso_desc.StreamOutput.RasterizedStream = D3D12_SO_NO_RASTERIZED_STREAM;
pso_desc.pRootSignature = root_signatures[0];
pso_desc.SampleDesc.Count = 1;
pso_desc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_POINT;
pso_desc.RasterizerState.FillMode = D3D12_FILL_MODE_SOLID;
pso_desc.RasterizerState.CullMode = D3D12_CULL_MODE_NONE;
pso_desc.InputLayout.NumElements = ARRAY_SIZE(layout_desc);
pso_desc.InputLayout.pInputElementDescs = layout_desc;
hr = ID3D12Device_CreateGraphicsPipelineState(context.device, &pso_desc, &IID_ID3D12PipelineState, (void**)&psos[0]);
ok(SUCCEEDED(hr), "Failed to create PSO, hr #%x.\n", hr);
pso_desc.VS.pShaderBytecode = vs_code_large_cbv;
pso_desc.VS.BytecodeLength = sizeof(vs_code_large_cbv);
pso_desc.pRootSignature = root_signatures[1];
hr = ID3D12Device_CreateGraphicsPipelineState(context.device, &pso_desc, &IID_ID3D12PipelineState, (void**)&psos[1]);
ok(SUCCEEDED(hr), "Failed to create PSO, hr #%x.\n", hr);
/* Verify sanity checks.
* As per validation layers, there must be exactly one command in the signature.
* It must come last. Verify that we check for this. */
memset(&command_signature_desc, 0, sizeof(command_signature_desc));
command_signature_desc.NumArgumentDescs = 1;
command_signature_desc.pArgumentDescs = indirect_argument_descs;
command_signature_desc.ByteStride = sizeof(D3D12_VERTEX_BUFFER_VIEW);
indirect_argument_descs[0].Type = D3D12_INDIRECT_ARGUMENT_TYPE_VERTEX_BUFFER_VIEW;
hr = ID3D12Device_CreateCommandSignature(context.device, &command_signature_desc, NULL,
&IID_ID3D12CommandSignature, (void**)&command_signature);
ok(hr == E_INVALIDARG, "Unexpected hr #%x.\n", hr);
command_signature_desc.NumArgumentDescs = 2;
command_signature_desc.pArgumentDescs = indirect_argument_descs;
command_signature_desc.ByteStride = sizeof(D3D12_DRAW_INDEXED_ARGUMENTS) + sizeof(D3D12_VERTEX_BUFFER_VIEW);
indirect_argument_descs[0].Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED;
indirect_argument_descs[1].Type = D3D12_INDIRECT_ARGUMENT_TYPE_VERTEX_BUFFER_VIEW;
hr = ID3D12Device_CreateCommandSignature(context.device, &command_signature_desc, NULL,
&IID_ID3D12CommandSignature, (void**)&command_signature);
ok(hr == E_INVALIDARG, "Unexpected hr #%x.\n", hr);
command_signature_desc.ByteStride = sizeof(D3D12_DRAW_INDEXED_ARGUMENTS) + sizeof(D3D12_DRAW_INDEXED_ARGUMENTS);
indirect_argument_descs[0].Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED;
indirect_argument_descs[1].Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED;
hr = ID3D12Device_CreateCommandSignature(context.device, &command_signature_desc, NULL,
&IID_ID3D12CommandSignature, (void**)&command_signature);
ok(hr == E_INVALIDARG, "Unexpected hr #%x.\n", hr);
for (i = 0; i < ARRAY_SIZE(tests); i++)
{
struct vec4 expect_reset_state[2];
const struct vec4 *expect, *v;
uint32_t expected_output_size;
uint32_t clear_vbo_mask;
bool root_cbv;
uint32_t size;
vkd3d_test_set_context("Test %u", i);
command_signature_desc.ByteStride = tests[i].stride;
command_signature_desc.pArgumentDescs = tests[i].indirect_arguments;
command_signature_desc.NumArgumentDescs = tests[i].indirect_argument_count;
command_signature_desc.NodeMask = 0;
hr = ID3D12Device_CreateCommandSignature(context.device, &command_signature_desc,
tests[i].needs_root_sig ? root_signatures[tests[i].pso_index] : NULL,
&IID_ID3D12CommandSignature, (void**)&command_signature);
/* Updating root CBV requires push BDA path, which we don't enable on NV by default yet. */
root_cbv = false;
for (j = 0; j < tests[i].indirect_argument_count; j++)
{
if (tests[i].indirect_arguments[j].Type == D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT_BUFFER_VIEW)
{
root_cbv = true;
break;
}
}
if (FAILED(hr))
{
if (root_cbv && is_nvidia_device(context.device))
skip("Creating indirect root CBV update failed. If the GPU is NVIDIA, try VKD3D_CONFIG=force_raw_va_cbv.\n");
else
skip("Failed creating command signature, skipping test.\n");
continue;
}
argument_buffer = create_upload_buffer(context.device, 256 * 1024, NULL);
argument_buffer_late = create_default_buffer(context.device, 256 * 1024,
D3D12_RESOURCE_FLAG_NONE, D3D12_RESOURCE_STATE_COPY_DEST);
#define UNALIGNED_ARGUMENT_BUFFER_OFFSET (64 * 1024 + 4)
#define UNALIGNED_COUNT_BUFFER_OFFSET (128 * 1024 + 4)
#define ALIGNED_COUNT_BUFFER_OFFSET (128 * 1024 + 4 * 1024)
{
uint8_t *ptr;
ID3D12Resource_Map(argument_buffer, 0, NULL, (void**)&ptr);
memcpy(ptr, tests[i].argument_buffer_data, tests[i].argument_buffer_size);
memcpy(ptr + UNALIGNED_ARGUMENT_BUFFER_OFFSET, tests[i].argument_buffer_data, tests[i].argument_buffer_size);
memcpy(ptr + UNALIGNED_COUNT_BUFFER_OFFSET, &tests[i].api_max_count, sizeof(tests[i].api_max_count));
memcpy(ptr + ALIGNED_COUNT_BUFFER_OFFSET, &tests[i].api_max_count, sizeof(tests[i].api_max_count));
ID3D12Resource_Unmap(argument_buffer, 0, NULL);
}
streamout_buffer = create_default_buffer(context.device, 64 * 1024,
D3D12_RESOURCE_FLAG_NONE, D3D12_RESOURCE_STATE_STREAM_OUT);
ID3D12GraphicsCommandList_SetGraphicsRootSignature(command_list, root_signatures[tests[i].pso_index]);
ID3D12GraphicsCommandList_SetPipelineState(command_list, psos[tests[i].pso_index]);
sov.SizeInBytes = 64 * 1024 - sizeof(struct vec4);
sov.BufferLocation = ID3D12Resource_GetGPUVirtualAddress(streamout_buffer) + sizeof(struct vec4);
sov.BufferFilledSizeLocation = ID3D12Resource_GetGPUVirtualAddress(streamout_buffer);
ID3D12GraphicsCommandList_SOSetTargets(command_list, 0, 1, &sov);
/* Set up default rendering state. */
ibv.BufferLocation = ID3D12Resource_GetGPUVirtualAddress(ibo[0]);
ibv.SizeInBytes = sizeof(ibo_data[0]);
ibv.Format = DXGI_FORMAT_R32_UINT;
vbvs[0].BufferLocation = ID3D12Resource_GetGPUVirtualAddress(vbo[0]);
vbvs[0].SizeInBytes = sizeof(vbo_data[0]);
vbvs[0].StrideInBytes = 4;
vbvs[1].BufferLocation = ID3D12Resource_GetGPUVirtualAddress(vbo[1]);
vbvs[1].SizeInBytes = sizeof(vbo_data[1]);
vbvs[1].StrideInBytes = 4;
ID3D12GraphicsCommandList_IASetIndexBuffer(command_list, &ibv);
ID3D12GraphicsCommandList_IASetPrimitiveTopology(command_list, D3D_PRIMITIVE_TOPOLOGY_POINTLIST);
ID3D12GraphicsCommandList_IASetVertexBuffers(command_list, 0, 2, vbvs);
for (j = 0; j < (tests[i].pso_index ? 12 : 1); j++)
ID3D12GraphicsCommandList_SetGraphicsRoot32BitConstants(command_list, 0, 4, &values, 4 * j);
ID3D12GraphicsCommandList_SetGraphicsRootConstantBufferView(command_list, 1,
ID3D12Resource_GetGPUVirtualAddress(cbv));
ID3D12GraphicsCommandList_SetGraphicsRootShaderResourceView(command_list, 2,
ID3D12Resource_GetGPUVirtualAddress(srv));
ID3D12GraphicsCommandList_SetGraphicsRootUnorderedAccessView(command_list, 3,
ID3D12Resource_GetGPUVirtualAddress(uav));
ID3D12GraphicsCommandList_ExecuteIndirect(command_list, command_signature, tests[i].api_max_count,
argument_buffer, 0, NULL, 0);
/* Test equivalent call with indirect count. */
ID3D12GraphicsCommandList_ExecuteIndirect(command_list, command_signature, 1024,
argument_buffer, UNALIGNED_ARGUMENT_BUFFER_OFFSET,
argument_buffer, UNALIGNED_COUNT_BUFFER_OFFSET);
/* Test equivalent, but now with late transition to INDIRECT. */
ID3D12GraphicsCommandList_CopyResource(command_list, argument_buffer_late, argument_buffer);
transition_resource_state(command_list, argument_buffer_late, D3D12_RESOURCE_STATE_COPY_DEST,
D3D12_RESOURCE_STATE_INDIRECT_ARGUMENT);
ID3D12GraphicsCommandList_ExecuteIndirect(command_list, command_signature, 1024,
argument_buffer_late, 0, argument_buffer_late, ALIGNED_COUNT_BUFFER_OFFSET);
/* Root descriptors which are part of the state block are cleared to NULL. Recover them here
* since attempting to draw next test will crash GPU. */
ID3D12GraphicsCommandList_SetGraphicsRootConstantBufferView(command_list, 1,
ID3D12Resource_GetGPUVirtualAddress(cbv));
ID3D12GraphicsCommandList_SetGraphicsRootShaderResourceView(command_list, 2,
ID3D12Resource_GetGPUVirtualAddress(srv));
ID3D12GraphicsCommandList_SetGraphicsRootUnorderedAccessView(command_list, 3,
ID3D12Resource_GetGPUVirtualAddress(uav));
/* Other state is cleared to 0. */
ID3D12GraphicsCommandList_DrawInstanced(command_list, 2, 1, 0, 0);
transition_resource_state(command_list, streamout_buffer, D3D12_RESOURCE_STATE_STREAM_OUT, D3D12_RESOURCE_STATE_COPY_SOURCE);
get_buffer_readback_with_command_list(streamout_buffer, DXGI_FORMAT_R32G32B32A32_FLOAT, &rb, queue, command_list);
reset_command_list(command_list, context.allocator);
expected_output_size = (tests[i].expected_output_count * 3 + 2) * sizeof(struct vec4);
size = get_readback_uint(&rb, 0, 0, 0);
ok(size == expected_output_size, "Expected size %u, got %u.\n", expected_output_size, size);
for (j = 0; j < tests[i].expected_output_count; j++)
{
expect = &tests[i].expected_output[j];
v = get_readback_vec4(&rb, j + 1, 0);
ok(compare_vec4(v, expect, 0), "Element (direct count) %u failed: (%f, %f, %f, %f) != (%f, %f, %f, %f)\n",
j, v->x, v->y, v->z, v->w, expect->x, expect->y, expect->z, expect->w);
v = get_readback_vec4(&rb, j + tests[i].expected_output_count + 1, 0);
ok(compare_vec4(v, expect, 0), "Element (indirect count) %u failed: (%f, %f, %f, %f) != (%f, %f, %f, %f)\n",
j, v->x, v->y, v->z, v->w, expect->x, expect->y, expect->z, expect->w);
v = get_readback_vec4(&rb, j + 2 * tests[i].expected_output_count + 1, 0);
ok(compare_vec4(v, expect, 0), "Element (late latch) %u failed: (%f, %f, %f, %f) != (%f, %f, %f, %f)\n",
j, v->x, v->y, v->z, v->w, expect->x, expect->y, expect->z, expect->w);
}
clear_vbo_mask = 0;
expect_reset_state[0] = values;
/* Root constant state is cleared to zero if it's part of the signature. */
for (j = 0; j < tests[i].indirect_argument_count; j++)
{
if (tests[i].indirect_arguments[j].Type == D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT)
{
for (k = 0; k < tests[i].indirect_arguments[j].Constant.Num32BitValuesToSet; k++)
(&expect_reset_state[0].x)[(tests[i].indirect_arguments[j].Constant.DestOffsetIn32BitValues + k) % 4] = 0.0f;
}
else if (tests[i].indirect_arguments[j].Type == D3D12_INDIRECT_ARGUMENT_TYPE_VERTEX_BUFFER_VIEW)
clear_vbo_mask |= 1u << tests[i].indirect_arguments[j].VertexBuffer.Slot;
}
expect_reset_state[1] = expect_reset_state[0];
/* VBO/IBO state is cleared to zero if it's part of the signature.
* A NULL IBO should be seen as a IBO which only reads 0 index. */
if (!(clear_vbo_mask & (1u << 0)))
expect_reset_state[1].x += 1.0f;
if (!(clear_vbo_mask & (1u << 1)))
{
expect_reset_state[0].y += 64.0f;
expect_reset_state[1].y += 65.0f;
}
for (j = 0; j < 2; j++)
{
v = get_readback_vec4(&rb, j + 1 + 3 * tests[i].expected_output_count, 0);
expect = &expect_reset_state[j];
ok(compare_vec4(v, expect, 0), "Post-reset element %u failed: (%f, %f, %f, %f) != (%f, %f, %f, %f)\n",
j, v->x, v->y, v->z, v->w, expect->x, expect->y, expect->z, expect->w);
}
ID3D12CommandSignature_Release(command_signature);
ID3D12Resource_Release(argument_buffer);
ID3D12Resource_Release(argument_buffer_late);
ID3D12Resource_Release(streamout_buffer);
release_resource_readback(&rb);
}
vkd3d_test_set_context(NULL);
for (i = 0; i < ARRAY_SIZE(psos); i++)
ID3D12PipelineState_Release(psos[i]);
for (i = 0; i < ARRAY_SIZE(root_signatures); i++)
ID3D12RootSignature_Release(root_signatures[i]);
for (i = 0; i < ARRAY_SIZE(vbo); i++)
ID3D12Resource_Release(vbo[i]);
for (i = 0; i < ARRAY_SIZE(ibo); i++)
ID3D12Resource_Release(ibo[i]);
ID3D12Resource_Release(cbv);
ID3D12Resource_Release(srv);
ID3D12Resource_Release(uav);
destroy_test_context(&context);
}
void test_execute_indirect(void)
{
ID3D12Resource *argument_buffer, *count_buffer, *uav;

View File

@ -5293,227 +5293,3 @@ void test_root_descriptor_offset_sign(void)
destroy_test_context(&context);
#endif
}
static void test_uav_counters_null_behavior(bool use_dxil)
{
D3D12_UNORDERED_ACCESS_VIEW_DESC uav_desc;
D3D12_ROOT_SIGNATURE_DESC rs_desc;
ID3D12DescriptorHeap *cpu_heap;
D3D12_ROOT_PARAMETER rs_param;
D3D12_DESCRIPTOR_RANGE range;
struct test_context context;
struct resource_readback rb;
ID3D12DescriptorHeap *heap;
ID3D12Resource *resource;
unsigned int i;
#if 0
RWStructuredBuffer<uint> RWBuf[4] : register(u0);
[numthreads(1, 1, 1)]
void main(int wg : SV_GroupID)
{
RWBuf[wg >> 2][wg & 3] = RWBuf[wg >> 2].IncrementCounter() + 64;
}
#endif
static const DWORD cs_code_dxbc[] =
{
0x43425844, 0xb5433247, 0x4cd30f6c, 0x58100e67, 0xc179ade1, 0x00000001, 0x00000134, 0x00000003,
0x0000002c, 0x0000003c, 0x0000004c, 0x4e475349, 0x00000008, 0x00000000, 0x00000008, 0x4e47534f,
0x00000008, 0x00000000, 0x00000008, 0x58454853, 0x000000e0, 0x00050051, 0x00000038, 0x0100086a,
0x0700009e, 0x0031ee46, 0x00000000, 0x00000000, 0x00000003, 0x00000004, 0x00000000, 0x0200005f,
0x00021012, 0x02000068, 0x00000002, 0x0400009b, 0x00000001, 0x00000001, 0x00000001, 0x0600002a,
0x00100012, 0x00000000, 0x0002100a, 0x00004001, 0x00000002, 0x06000001, 0x00100022, 0x00000000,
0x0002100a, 0x00004001, 0x00000003, 0x070000b2, 0x00100012, 0x00000001, 0x0421e000, 0x00000000,
0x0010000a, 0x00000000, 0x0700001e, 0x00100042, 0x00000000, 0x0010000a, 0x00000001, 0x00004001,
0x00000040, 0x0b0000a8, 0x0421e012, 0x00000000, 0x0010000a, 0x00000000, 0x0010001a, 0x00000000,
0x00004001, 0x00000000, 0x0010002a, 0x00000000, 0x0100003e,
};
static const BYTE cs_code_dxil[] =
{
0x44, 0x58, 0x42, 0x43, 0xc6, 0xfe, 0xe1, 0x77, 0xd8, 0x5c, 0x56, 0xc7, 0x6e, 0xf7, 0xe2, 0xf7, 0xb3, 0xb0, 0x40, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x34, 0x06, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
0x38, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x58, 0x00, 0x00, 0x00, 0x68, 0x00, 0x00, 0x00, 0xd0, 0x00, 0x00, 0x00, 0xec, 0x00, 0x00, 0x00, 0x53, 0x46, 0x49, 0x30, 0x08, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x53, 0x47, 0x31, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x4f, 0x53, 0x47, 0x31, 0x08, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x50, 0x53, 0x56, 0x30, 0x60, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x41, 0x53, 0x48, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2c, 0xc1, 0xf5, 0xe2,
0x29, 0x0a, 0x7c, 0x68, 0x4a, 0xfa, 0x15, 0xe9, 0x1a, 0x85, 0x63, 0x21, 0x44, 0x58, 0x49, 0x4c, 0x40, 0x05, 0x00, 0x00, 0x60, 0x00, 0x05, 0x00, 0x50, 0x01, 0x00, 0x00, 0x44, 0x58, 0x49, 0x4c,
0x00, 0x01, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x28, 0x05, 0x00, 0x00, 0x42, 0x43, 0xc0, 0xde, 0x21, 0x0c, 0x00, 0x00, 0x47, 0x01, 0x00, 0x00, 0x0b, 0x82, 0x20, 0x00, 0x02, 0x00, 0x00, 0x00,
0x13, 0x00, 0x00, 0x00, 0x07, 0x81, 0x23, 0x91, 0x41, 0xc8, 0x04, 0x49, 0x06, 0x10, 0x32, 0x39, 0x92, 0x01, 0x84, 0x0c, 0x25, 0x05, 0x08, 0x19, 0x1e, 0x04, 0x8b, 0x62, 0x80, 0x14, 0x45, 0x02,
0x42, 0x92, 0x0b, 0x42, 0xa4, 0x10, 0x32, 0x14, 0x38, 0x08, 0x18, 0x4b, 0x0a, 0x32, 0x52, 0x88, 0x48, 0x90, 0x14, 0x20, 0x43, 0x46, 0x88, 0xa5, 0x00, 0x19, 0x32, 0x42, 0xe4, 0x48, 0x0e, 0x90,
0x91, 0x22, 0xc4, 0x50, 0x41, 0x51, 0x81, 0x8c, 0xe1, 0x83, 0xe5, 0x8a, 0x04, 0x29, 0x46, 0x06, 0x51, 0x18, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x1b, 0x8c, 0xe0, 0xff, 0xff, 0xff, 0xff, 0x07,
0x40, 0x02, 0xa8, 0x0d, 0x84, 0xf0, 0xff, 0xff, 0xff, 0xff, 0x03, 0x20, 0x6d, 0x30, 0x86, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x09, 0xa8, 0x00, 0x49, 0x18, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
0x13, 0x82, 0x60, 0x42, 0x20, 0x4c, 0x08, 0x06, 0x00, 0x00, 0x00, 0x00, 0x89, 0x20, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x32, 0x22, 0x48, 0x09, 0x20, 0x64, 0x85, 0x04, 0x93, 0x22, 0xa4, 0x84,
0x04, 0x93, 0x22, 0xe3, 0x84, 0xa1, 0x90, 0x14, 0x12, 0x4c, 0x8a, 0x8c, 0x0b, 0x84, 0xa4, 0x4c, 0x10, 0x54, 0x23, 0x00, 0x25, 0x00, 0x14, 0xe6, 0x08, 0xc0, 0xa0, 0x0c, 0x63, 0x0c, 0x22, 0x73,
0x04, 0x08, 0x99, 0x7b, 0x86, 0xcb, 0x9f, 0xb0, 0x87, 0x90, 0xfc, 0x10, 0x68, 0x86, 0x85, 0x40, 0xc1, 0x29, 0xc4, 0x18, 0xc8, 0x50, 0x9a, 0x23, 0x08, 0x8a, 0x81, 0x86, 0x19, 0x63, 0x11, 0x2b,
0x0a, 0x18, 0x68, 0x8c, 0x31, 0xc6, 0x30, 0xe4, 0x06, 0x02, 0x66, 0x32, 0x83, 0x71, 0x60, 0x87, 0x70, 0x98, 0x87, 0x79, 0x70, 0x03, 0x59, 0xb8, 0x85, 0x59, 0xa0, 0x07, 0x79, 0xa8, 0x87, 0x71,
0xa0, 0x87, 0x7a, 0x90, 0x87, 0x72, 0x20, 0x07, 0x51, 0xa8, 0x07, 0x73, 0x30, 0x87, 0x72, 0x90, 0x07, 0x3e, 0xa8, 0x07, 0x77, 0x98, 0x87, 0x74, 0x38, 0x07, 0x77, 0x28, 0x07, 0x72, 0x00, 0x83,
0x74, 0x70, 0x07, 0x7a, 0xf0, 0x03, 0x14, 0x8c, 0x24, 0x88, 0x24, 0xe7, 0x08, 0x40, 0x01, 0x00, 0x13, 0x14, 0x72, 0xc0, 0x87, 0x74, 0x60, 0x87, 0x36, 0x68, 0x87, 0x79, 0x68, 0x03, 0x72, 0xc0,
0x87, 0x0d, 0xaf, 0x50, 0x0e, 0x6d, 0xd0, 0x0e, 0x7a, 0x50, 0x0e, 0x6d, 0x00, 0x0f, 0x7a, 0x30, 0x07, 0x72, 0xa0, 0x07, 0x73, 0x20, 0x07, 0x6d, 0x90, 0x0e, 0x71, 0xa0, 0x07, 0x73, 0x20, 0x07,
0x6d, 0x90, 0x0e, 0x78, 0xa0, 0x07, 0x73, 0x20, 0x07, 0x6d, 0x90, 0x0e, 0x71, 0x60, 0x07, 0x7a, 0x30, 0x07, 0x72, 0xd0, 0x06, 0xe9, 0x30, 0x07, 0x72, 0xa0, 0x07, 0x73, 0x20, 0x07, 0x6d, 0x90,
0x0e, 0x76, 0x40, 0x07, 0x7a, 0x60, 0x07, 0x74, 0xd0, 0x06, 0xe6, 0x10, 0x07, 0x76, 0xa0, 0x07, 0x73, 0x20, 0x07, 0x6d, 0x60, 0x0e, 0x73, 0x20, 0x07, 0x7a, 0x30, 0x07, 0x72, 0xd0, 0x06, 0xe6,
0x60, 0x07, 0x74, 0xa0, 0x07, 0x76, 0x40, 0x07, 0x6d, 0xe0, 0x0e, 0x78, 0xa0, 0x07, 0x71, 0x60, 0x07, 0x7a, 0x30, 0x07, 0x72, 0xa0, 0x07, 0x76, 0x40, 0x07, 0x43, 0x9e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0x3c, 0x04, 0x10, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x79, 0x12, 0x20, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x18, 0xf2, 0x30, 0x40, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0xe4, 0x71, 0x80, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x0b, 0x04, 0x00, 0x00,
0x09, 0x00, 0x00, 0x00, 0x32, 0x1e, 0x98, 0x10, 0x19, 0x11, 0x4c, 0x90, 0x8c, 0x09, 0x26, 0x47, 0xc6, 0x04, 0x43, 0x1a, 0x25, 0x30, 0x02, 0x50, 0x0c, 0x85, 0x50, 0x18, 0xb4, 0x46, 0x00, 0x6a,
0x80, 0x68, 0x81, 0xd0, 0x9c, 0x01, 0x00, 0x00, 0x79, 0x18, 0x00, 0x00, 0x3b, 0x00, 0x00, 0x00, 0x1a, 0x03, 0x4c, 0x90, 0x46, 0x02, 0x13, 0x44, 0x35, 0x18, 0x63, 0x0b, 0x73, 0x3b, 0x03, 0xb1,
0x2b, 0x93, 0x9b, 0x4b, 0x7b, 0x73, 0x03, 0x99, 0x71, 0xb9, 0x01, 0x41, 0xa1, 0x0b, 0x3b, 0x9b, 0x7b, 0x91, 0x2a, 0x62, 0x2a, 0x0a, 0x9a, 0x2a, 0xfa, 0x9a, 0xb9, 0x81, 0x79, 0x31, 0x4b, 0x73,
0x0b, 0x63, 0x4b, 0xd9, 0x10, 0x04, 0x13, 0x84, 0xa1, 0x98, 0x20, 0x0c, 0xc6, 0x06, 0x61, 0x20, 0x26, 0x08, 0xc3, 0xb1, 0x41, 0x18, 0x0c, 0x0a, 0x63, 0x73, 0x1b, 0x06, 0xc4, 0x20, 0x26, 0x08,
0x53, 0x43, 0x60, 0x82, 0x30, 0x20, 0x13, 0x84, 0x21, 0x99, 0x20, 0x2c, 0xca, 0x04, 0x61, 0x59, 0x36, 0x08, 0x03, 0xb3, 0x61, 0x21, 0x94, 0x85, 0x20, 0x98, 0xc6, 0x79, 0x1c, 0x68, 0x43, 0x10,
0x6d, 0x20, 0x00, 0x09, 0x00, 0x26, 0x08, 0x02, 0x40, 0xa2, 0x2d, 0x2c, 0xcd, 0x6d, 0x82, 0x40, 0x31, 0x1b, 0x86, 0x61, 0x18, 0x36, 0x10, 0x84, 0xc5, 0x5c, 0x1b, 0x0a, 0xaa, 0x02, 0x26, 0xac,
0x0a, 0x1b, 0x9b, 0x5d, 0x9b, 0x4b, 0x1a, 0x59, 0x99, 0x1b, 0xdd, 0x94, 0x20, 0xa8, 0x42, 0x86, 0xe7, 0x62, 0x57, 0x26, 0x37, 0x97, 0xf6, 0xe6, 0x36, 0x25, 0x20, 0x9a, 0x90, 0xe1, 0xb9, 0xd8,
0x85, 0xb1, 0xd9, 0x95, 0xc9, 0x4d, 0x09, 0x8c, 0x3a, 0x64, 0x78, 0x2e, 0x73, 0x68, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x53, 0x02, 0xa4, 0x0c, 0x19, 0x9e, 0x8b, 0x5c, 0xd9,
0xdc, 0x5b, 0x9d, 0xdc, 0x58, 0xd9, 0xdc, 0x94, 0x40, 0xaa, 0x43, 0x86, 0xe7, 0x52, 0xe6, 0x46, 0x27, 0x97, 0x07, 0xf5, 0x96, 0xe6, 0x46, 0x37, 0x37, 0x25, 0xc0, 0x00, 0x79, 0x18, 0x00, 0x00,
0x4c, 0x00, 0x00, 0x00, 0x33, 0x08, 0x80, 0x1c, 0xc4, 0xe1, 0x1c, 0x66, 0x14, 0x01, 0x3d, 0x88, 0x43, 0x38, 0x84, 0xc3, 0x8c, 0x42, 0x80, 0x07, 0x79, 0x78, 0x07, 0x73, 0x98, 0x71, 0x0c, 0xe6,
0x00, 0x0f, 0xed, 0x10, 0x0e, 0xf4, 0x80, 0x0e, 0x33, 0x0c, 0x42, 0x1e, 0xc2, 0xc1, 0x1d, 0xce, 0xa1, 0x1c, 0x66, 0x30, 0x05, 0x3d, 0x88, 0x43, 0x38, 0x84, 0x83, 0x1b, 0xcc, 0x03, 0x3d, 0xc8,
0x43, 0x3d, 0x8c, 0x03, 0x3d, 0xcc, 0x78, 0x8c, 0x74, 0x70, 0x07, 0x7b, 0x08, 0x07, 0x79, 0x48, 0x87, 0x70, 0x70, 0x07, 0x7a, 0x70, 0x03, 0x76, 0x78, 0x87, 0x70, 0x20, 0x87, 0x19, 0xcc, 0x11,
0x0e, 0xec, 0x90, 0x0e, 0xe1, 0x30, 0x0f, 0x6e, 0x30, 0x0f, 0xe3, 0xf0, 0x0e, 0xf0, 0x50, 0x0e, 0x33, 0x10, 0xc4, 0x1d, 0xde, 0x21, 0x1c, 0xd8, 0x21, 0x1d, 0xc2, 0x61, 0x1e, 0x66, 0x30, 0x89,
0x3b, 0xbc, 0x83, 0x3b, 0xd0, 0x43, 0x39, 0xb4, 0x03, 0x3c, 0xbc, 0x83, 0x3c, 0x84, 0x03, 0x3b, 0xcc, 0xf0, 0x14, 0x76, 0x60, 0x07, 0x7b, 0x68, 0x07, 0x37, 0x68, 0x87, 0x72, 0x68, 0x07, 0x37,
0x80, 0x87, 0x70, 0x90, 0x87, 0x70, 0x60, 0x07, 0x76, 0x28, 0x07, 0x76, 0xf8, 0x05, 0x76, 0x78, 0x87, 0x77, 0x80, 0x87, 0x5f, 0x08, 0x87, 0x71, 0x18, 0x87, 0x72, 0x98, 0x87, 0x79, 0x98, 0x81,
0x2c, 0xee, 0xf0, 0x0e, 0xee, 0xe0, 0x0e, 0xf5, 0xc0, 0x0e, 0xec, 0x30, 0x03, 0x62, 0xc8, 0xa1, 0x1c, 0xe4, 0xa1, 0x1c, 0xcc, 0xa1, 0x1c, 0xe4, 0xa1, 0x1c, 0xdc, 0x61, 0x1c, 0xca, 0x21, 0x1c,
0xc4, 0x81, 0x1d, 0xca, 0x61, 0x06, 0xd6, 0x90, 0x43, 0x39, 0xc8, 0x43, 0x39, 0x98, 0x43, 0x39, 0xc8, 0x43, 0x39, 0xb8, 0xc3, 0x38, 0x94, 0x43, 0x38, 0x88, 0x03, 0x3b, 0x94, 0xc3, 0x2f, 0xbc,
0x83, 0x3c, 0xfc, 0x82, 0x3b, 0xd4, 0x03, 0x3b, 0xb0, 0xc3, 0x0c, 0xc4, 0x21, 0x07, 0x7c, 0x70, 0x03, 0x7a, 0x28, 0x87, 0x76, 0x80, 0x87, 0x19, 0xd1, 0x43, 0x0e, 0xf8, 0xe0, 0x06, 0xe4, 0x20,
0x0e, 0xe7, 0xe0, 0x06, 0xf6, 0x10, 0x0e, 0xf2, 0xc0, 0x0e, 0xe1, 0x90, 0x0f, 0xef, 0x50, 0x0f, 0xf4, 0x00, 0x00, 0x00, 0x71, 0x20, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x46, 0x50, 0x0d, 0x97,
0xef, 0x3c, 0x7e, 0x40, 0x15, 0x05, 0x11, 0xb1, 0x93, 0x13, 0x11, 0x3e, 0x72, 0xdb, 0x26, 0x90, 0x0d, 0x97, 0xef, 0x3c, 0x7e, 0x40, 0x15, 0x05, 0x11, 0xb9, 0xcf, 0x00, 0x4c, 0x04, 0xe7, 0x50,
0xcd, 0x44, 0x44, 0x36, 0x20, 0x0d, 0x97, 0xef, 0x3c, 0xbe, 0x10, 0x11, 0xc0, 0x44, 0x84, 0x40, 0x33, 0x2c, 0x84, 0x05, 0x44, 0xc3, 0xe5, 0x3b, 0x8f, 0x6f, 0x44, 0x0e, 0xf5, 0x88, 0x83, 0x8f,
0xdc, 0xb6, 0x01, 0x10, 0x0c, 0x80, 0x34, 0x00, 0x61, 0x20, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x13, 0x04, 0x41, 0x2c, 0x10, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x34, 0x66, 0x00, 0x8a,
0x37, 0xa0, 0x08, 0x4a, 0xae, 0x18, 0x03, 0x0a, 0x30, 0xa0, 0x0c, 0x4a, 0x31, 0x80, 0x4c, 0x09, 0x00, 0x00, 0x00, 0x00, 0x23, 0x06, 0x06, 0x00, 0x82, 0x60, 0x40, 0x58, 0x48, 0x54, 0x01, 0x92,
0x15, 0x4c, 0x30, 0x62, 0x90, 0x00, 0x20, 0x08, 0x06, 0xcc, 0x95, 0x10, 0x54, 0x00, 0x8d, 0x18, 0x1c, 0x00, 0x08, 0x82, 0x41, 0x92, 0x25, 0x41, 0x51, 0x41, 0x02, 0x65, 0x24, 0x3a, 0x62, 0xd0,
0x00, 0x20, 0x08, 0x06, 0x8e, 0x96, 0x10, 0x01, 0x26, 0x40, 0x10, 0x84, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
static const D3D12_SHADER_BYTECODE cs_dxbc = SHADER_BYTECODE(cs_code_dxbc);
static const D3D12_SHADER_BYTECODE cs_dxil = SHADER_BYTECODE(cs_code_dxil);
if (!init_compute_test_context(&context))
return;
if (use_dxil && !context_supports_dxil(&context))
{
skip("Context does not support DXIL.\n");
destroy_test_context(&context);
return;
}
memset(&rs_desc, 0, sizeof(rs_desc));
memset(&rs_param, 0, sizeof(rs_param));
memset(&range, 0, sizeof(range));
rs_desc.NumParameters = 1;
rs_desc.pParameters = &rs_param;
rs_param.ParameterType = D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE;
rs_param.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
rs_param.DescriptorTable.NumDescriptorRanges = 1;
rs_param.DescriptorTable.pDescriptorRanges = &range;
range.RangeType = D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
range.NumDescriptors = 8;
create_root_signature(context.device, &rs_desc, &context.root_signature);
context.pipeline_state = create_compute_pipeline_state(context.device, context.root_signature, use_dxil ? cs_dxil : cs_dxbc);
cpu_heap = create_cpu_descriptor_heap(context.device, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV, 8);
heap = create_gpu_descriptor_heap(context.device, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV, 8);
resource = create_default_buffer(context.device, D3D12_UAV_COUNTER_PLACEMENT_ALIGNMENT * 9,
D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS, D3D12_RESOURCE_STATE_UNORDERED_ACCESS);
memset(&uav_desc, 0, sizeof(uav_desc));
uav_desc.ViewDimension = D3D12_UAV_DIMENSION_BUFFER;
for (i = 0; i < 8; i++)
{
D3D12_CPU_DESCRIPTOR_HANDLE cpu_h, gpu_h;
cpu_h = ID3D12DescriptorHeap_GetCPUDescriptorHandleForHeapStart(cpu_heap);
gpu_h = ID3D12DescriptorHeap_GetCPUDescriptorHandleForHeapStart(heap);
cpu_h.ptr += ID3D12Device_GetDescriptorHandleIncrementSize(context.device, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV) * i;
gpu_h.ptr += ID3D12Device_GetDescriptorHandleIncrementSize(context.device, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV) * i;
uav_desc.Buffer.NumElements = 4;
uav_desc.Buffer.FirstElement = 4 * i;
uav_desc.Buffer.StructureByteStride = 4;
uav_desc.Buffer.CounterOffsetInBytes = D3D12_UAV_COUNTER_PLACEMENT_ALIGNMENT * (i + 1);
/* AMD drivers don't seem to clear the UAV counter if we pass in NULL, so
* test a path which does not do that. */
if (i < 4)
{
ID3D12Device_CreateUnorderedAccessView(context.device, resource, resource, &uav_desc, cpu_h);
ID3D12Device_CreateUnorderedAccessView(context.device, resource, resource, &uav_desc, gpu_h);
}
uav_desc.Buffer.CounterOffsetInBytes = 0;
/* Test writing NULL UAV counter after a non-NULL UAV counter. Makes sure that we are indeed supposed
* to clear out UAV counters to NULL every time. */
if ((i & 3) == 3)
{
ID3D12Device_CreateUnorderedAccessView(context.device, NULL, NULL, &uav_desc, cpu_h);
ID3D12Device_CreateUnorderedAccessView(context.device, NULL, NULL, &uav_desc, gpu_h);
}
else if ((i & 3) >= 1)
{
ID3D12Device_CreateUnorderedAccessView(context.device, resource, NULL, &uav_desc, cpu_h);
ID3D12Device_CreateUnorderedAccessView(context.device, resource, NULL, &uav_desc, gpu_h);
}
else
{
/* Test copy behavior. Make sure we correctly copy NULL counters as well. */
ID3D12Device_CreateUnorderedAccessView(context.device, resource, NULL, &uav_desc, cpu_h);
ID3D12Device_CopyDescriptorsSimple(context.device, 1,
gpu_h, cpu_h, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
}
}
ID3D12GraphicsCommandList_SetDescriptorHeaps(context.list, 1, &heap);
ID3D12GraphicsCommandList_SetComputeRootSignature(context.list, context.root_signature);
ID3D12GraphicsCommandList_SetPipelineState(context.list, context.pipeline_state);
ID3D12GraphicsCommandList_SetComputeRootDescriptorTable(context.list, 0,
ID3D12DescriptorHeap_GetGPUDescriptorHandleForHeapStart(heap));
ID3D12GraphicsCommandList_Dispatch(context.list, 8 * 4, 1, 1);
transition_resource_state(context.list, resource, D3D12_RESOURCE_STATE_UNORDERED_ACCESS, D3D12_RESOURCE_STATE_COPY_SOURCE);
get_buffer_readback_with_command_list(resource, DXGI_FORMAT_R32_UINT, &rb, context.queue, context.list);
for (i = 0; i < 8 * 4; i++)
{
/* Possible behavior is very varied here:
* NV: If UAV counter is NULL, NV makes the main descriptor robust.
* AMD: Writing NULL uav counter does not update the counter descriptor, the atomic update will still go through.
* Intel: Behaves as you would expect. Atomic op returns 0, writes to main descriptor behaves as you'd expect. */
uint32_t value = get_readback_uint(&rb, i, 0, 0);
ok(value == 0 || (value >= 64 && value < (64 + 4)), "Unexpected value %u = %u\n", i, value);
}
for (i = 0; i < 8; i++)
{
uint32_t value = get_readback_uint(&rb, (i + 1) * (D3D12_UAV_COUNTER_PLACEMENT_ALIGNMENT / 4), 0, 0);
if (i < 4)
{
/* AMD behavior: Passing NULL does not necessarily clear out UAV counter.
* It is undefined to access UAV counter like this.
* https://docs.microsoft.com/en-us/windows/win32/direct3d12/uav-counters
* "If a shader attempts to access the counter of a UAV that does not have an associated counter,
* then the debug layer will issue a warning,
* and a GPU page fault will occur causing the appss device to be removed." */
ok(value == 0 || value == 4, "Unexpected counter %u = %u.\n", i, value);
}
else
{
/* Technically undefined, but all drivers behave robustly here, we should too. */
ok(value == 0, "Unexpected counter %u = %u.\n", i, value);
}
}
release_resource_readback(&rb);
ID3D12DescriptorHeap_Release(heap);
ID3D12DescriptorHeap_Release(cpu_heap);
ID3D12Resource_Release(resource);
destroy_test_context(&context);
}
void test_uav_counter_null_behavior_dxbc(void)
{
test_uav_counters_null_behavior(false);
}
void test_uav_counter_null_behavior_dxil(void)
{
test_uav_counters_null_behavior(true);
}

File diff suppressed because it is too large Load Diff

View File

@ -1467,36 +1467,3 @@ void test_missing_bindings_root_signature(void)
destroy_test_context(&context);
}
void test_root_signature_empty_blob(void)
{
ID3D12RootSignature *root_signature;
struct test_context context;
HRESULT hr;
static const DWORD cs_code[] =
{
#if 0
RWStructuredBuffer<uint> RWBuf;
[numthreads(1, 1, 1)]
void main(int wg : SV_GroupID)
{
RWBuf[wg] = wg;
}
#endif
0x43425844, 0x81a88c98, 0x1ab24abd, 0xfdb8fb1f, 0x7e9cb035, 0x00000001, 0x000000a8, 0x00000003,
0x0000002c, 0x0000003c, 0x0000004c, 0x4e475349, 0x00000008, 0x00000000, 0x00000008, 0x4e47534f,
0x00000008, 0x00000000, 0x00000008, 0x58454853, 0x00000054, 0x00050050, 0x00000015, 0x0100086a,
0x0400009e, 0x0011e000, 0x00000000, 0x00000004, 0x0200005f, 0x00021012, 0x0400009b, 0x00000001,
0x00000001, 0x00000001, 0x070000a8, 0x0011e012, 0x00000000, 0x0002100a, 0x00004001, 0x00000000,
0x0002100a, 0x0100003e,
};
if (!init_compute_test_context(&context))
return;
hr = ID3D12Device_CreateRootSignature(context.device, 0, cs_code, sizeof(cs_code), &IID_ID3D12RootSignature, (void **)&root_signature);
/* Has to be E_FAIL, not E_INVALIDARG, oddly enough. */
ok(hr == E_FAIL, "Unexpected hr #%x.\n", hr);
destroy_test_context(&context);
}

View File

@ -3383,248 +3383,3 @@ void test_texture_feedback_instructions_dxil(void)
test_texture_feedback_instructions(true);
}
void test_sparse_buffer_memory_lifetime(void)
{
/* Attempt to bind sparse memory, then free the underlying heap, but keep the sparse resource
* alive. This should confuse drivers that attempt to track BO lifetimes. */
D3D12_UNORDERED_ACCESS_VIEW_DESC uav_desc;
D3D12_SHADER_RESOURCE_VIEW_DESC srv_desc;
D3D12_FEATURE_DATA_D3D12_OPTIONS options;
const UINT values[] = { 42, 42, 42, 42 };
D3D12_ROOT_PARAMETER root_parameters[2];
D3D12_TILE_REGION_SIZE region_size;
D3D12_GPU_DESCRIPTOR_HANDLE h_gpu;
D3D12_CPU_DESCRIPTOR_HANDLE h_cpu;
D3D12_ROOT_SIGNATURE_DESC rs_desc;
D3D12_DESCRIPTOR_RANGE desc_range;
struct test_context context;
struct resource_readback rb;
ID3D12DescriptorHeap *cpu;
ID3D12DescriptorHeap *gpu;
D3D12_HEAP_DESC heap_desc;
D3D12_RESOURCE_DESC desc;
ID3D12Resource *sparse;
ID3D12Resource *buffer;
ID3D12Heap *heap_live;
ID3D12Heap *heap;
unsigned int i;
HRESULT hr;
static const DWORD cs_sparse_query_dxbc[] =
{
#if 0
RWStructuredBuffer<uint> RWBuf : register(u0);
Buffer<uint> Buf : register(t0);
[numthreads(1, 1, 1)]
void main(uint thr : SV_DispatchThreadID)
{
uint code;
// Sample mapped, but freed memory. See what CheckAccessFullyMapped returns.
uint data = Buf.Load(thr, code);
uint value = CheckAccessFullyMapped(code) ? (1u << 16) : 0u;
value |= data & 0xffffu;
RWBuf[2 * thr + 0] = value;
// Sample not yet mapped memory. See what CheckAccessFullyMapped returns.
data = Buf.Load(thr + 1024 * 1024, code);
value = CheckAccessFullyMapped(code) ? (1u << 16) : 0u;
value |= data & 0xffffu;
RWBuf[2 * thr + 1] = value;
}
#endif
0x43425844, 0x8c2a40af, 0x2a9b20a6, 0xa99f0977, 0x37daacf5, 0x00000001, 0x00000280, 0x00000004,
0x00000030, 0x00000040, 0x00000050, 0x00000270, 0x4e475349, 0x00000008, 0x00000000, 0x00000008,
0x4e47534f, 0x00000008, 0x00000000, 0x00000008, 0x58454853, 0x00000218, 0x00050050, 0x00000086,
0x0100086a, 0x04000858, 0x00107000, 0x00000000, 0x00004444, 0x0400009e, 0x0011e000, 0x00000000,
0x00000004, 0x0200005f, 0x00020012, 0x02000068, 0x00000002, 0x0400009b, 0x00000001, 0x00000001,
0x00000001, 0x8a0000df, 0x80000042, 0x00111103, 0x00100012, 0x00000000, 0x00100012, 0x00000001,
0x00020006, 0x00107e46, 0x00000000, 0x050000ea, 0x00100022, 0x00000000, 0x0010000a, 0x00000001,
0x09000037, 0x00100022, 0x00000000, 0x0010001a, 0x00000000, 0x00004001, 0x00010000, 0x00004001,
0x00000000, 0x0b00008c, 0x00100012, 0x00000000, 0x00004001, 0x00000010, 0x00004001, 0x00000000,
0x0010000a, 0x00000000, 0x0010001a, 0x00000000, 0x06000029, 0x00100022, 0x00000000, 0x0002000a,
0x00004001, 0x00000001, 0x090000a8, 0x0011e012, 0x00000000, 0x0010001a, 0x00000000, 0x00004001,
0x00000000, 0x0010000a, 0x00000000, 0x1300008c, 0x00100052, 0x00000000, 0x00004002, 0x00000014,
0x00000000, 0x0000001f, 0x00000000, 0x00004002, 0x00000000, 0x00000000, 0x00000001, 0x00000000,
0x00020006, 0x00004002, 0x00100000, 0x00000000, 0x00000001, 0x00000000, 0x8b0000df, 0x80000042,
0x00111103, 0x00100012, 0x00000000, 0x00100012, 0x00000001, 0x00100006, 0x00000000, 0x00107e46,
0x00000000, 0x050000ea, 0x00100082, 0x00000000, 0x0010000a, 0x00000001, 0x09000037, 0x00100082,
0x00000000, 0x0010003a, 0x00000000, 0x00004001, 0x00010000, 0x00004001, 0x00000000, 0x0b00008c,
0x00100012, 0x00000000, 0x00004001, 0x00000010, 0x00004001, 0x00000000, 0x0010000a, 0x00000000,
0x0010003a, 0x00000000, 0x090000a8, 0x0011e012, 0x00000000, 0x0010002a, 0x00000000, 0x00004001,
0x00000000, 0x0010000a, 0x00000000, 0x0100003e, 0x30494653, 0x00000008, 0x00000100, 0x00000000,
};
static const D3D12_SHADER_BYTECODE cs_sparse_query = SHADER_BYTECODE(cs_sparse_query_dxbc);
if (!init_compute_test_context(&context))
return;
hr = ID3D12Device_CheckFeatureSupport(context.device, D3D12_FEATURE_D3D12_OPTIONS, &options, sizeof(options));
ok(hr == S_OK, "Failed to check feature support, hr %#x.\n", hr);
if (options.TiledResourcesTier < D3D12_TILED_RESOURCES_TIER_2)
{
skip("Tiled resources Tier 2 not supported by device.\n");
destroy_test_context(&context);
return;
}
memset(&rs_desc, 0, sizeof(rs_desc));
memset(root_parameters, 0, sizeof(root_parameters));
memset(&desc_range, 0, sizeof(desc_range));
rs_desc.NumParameters = ARRAY_SIZE(root_parameters);
rs_desc.pParameters = root_parameters;
root_parameters[0].ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
root_parameters[0].ParameterType = D3D12_ROOT_PARAMETER_TYPE_UAV;
root_parameters[1].ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
root_parameters[1].ParameterType = D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE;
root_parameters[1].DescriptorTable.NumDescriptorRanges = 1;
root_parameters[1].DescriptorTable.pDescriptorRanges = &desc_range;
desc_range.NumDescriptors = 1;
desc_range.RangeType = D3D12_DESCRIPTOR_RANGE_TYPE_SRV;
create_root_signature(context.device, &rs_desc, &context.root_signature);
context.pipeline_state = create_compute_pipeline_state(context.device, context.root_signature, cs_sparse_query);
memset(&heap_desc, 0, sizeof(heap_desc));
heap_desc.SizeInBytes = 4 * 1024 * 1024;
heap_desc.Properties.Type = D3D12_HEAP_TYPE_DEFAULT;
heap_desc.Alignment = D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT;
heap_desc.Flags = D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS;
hr = ID3D12Device_CreateHeap(context.device, &heap_desc, &IID_ID3D12Heap, (void**)&heap);
ok(SUCCEEDED(hr), "Failed to create heap, hr #%x.\n", hr);
hr = ID3D12Device_CreateHeap(context.device, &heap_desc, &IID_ID3D12Heap, (void**)&heap_live);
ok(SUCCEEDED(hr), "Failed to create heap, hr #%x.\n", hr);
memset(&desc, 0, sizeof(desc));
desc.Width = 64 * 1024 * 1024;
desc.Height = 1;
desc.DepthOrArraySize = 1;
desc.SampleDesc.Count = 1;
desc.Format = DXGI_FORMAT_UNKNOWN;
desc.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
desc.MipLevels = 1;
desc.Alignment = D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT;
desc.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
desc.Flags = D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
hr = ID3D12Device_CreateReservedResource(context.device, &desc, D3D12_RESOURCE_STATE_UNORDERED_ACCESS,
NULL, &IID_ID3D12Resource, (void**)&sparse);
ok(SUCCEEDED(hr), "Failed to create reserved resource, hr #%x.\n", hr);
{
const D3D12_TILED_RESOURCE_COORDINATE region_start_coordinate = { 0 };
const D3D12_TILE_RANGE_FLAGS range_flag = D3D12_TILE_RANGE_FLAG_NULL;
const UINT offset = 0;
const UINT count = desc.Width / (64 * 1024);
region_size.UseBox = FALSE;
region_size.NumTiles = desc.Width / (64 * 1024);
ID3D12CommandQueue_UpdateTileMappings(context.queue, sparse, 1, &region_start_coordinate, &region_size,
NULL, 1, &range_flag, &offset, &count, D3D12_TILE_MAPPING_FLAG_NONE);
}
region_size.UseBox = FALSE;
region_size.NumTiles = 1;
for (i = 0; i < 2; i++)
{
const D3D12_TILED_RESOURCE_COORDINATE region_start_coordinate = { i, 0, 0, 0 };
const D3D12_TILE_RANGE_FLAGS range_flag = D3D12_TILE_RANGE_FLAG_NONE;
const UINT offset = i;
const UINT count = 1;
ID3D12CommandQueue_UpdateTileMappings(context.queue, sparse, 1, &region_start_coordinate, &region_size,
i == 0 ? heap : heap_live, 1, &range_flag, &offset, &count, D3D12_TILE_MAPPING_FLAG_NONE);
}
wait_queue_idle(context.device, context.queue);
buffer = create_default_buffer(context.device, 128 * 1024,
D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS, D3D12_RESOURCE_STATE_COPY_DEST);
cpu = create_cpu_descriptor_heap(context.device, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV, 1);
gpu = create_gpu_descriptor_heap(context.device, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV, 2);
memset(&uav_desc, 0, sizeof(uav_desc));
uav_desc.ViewDimension = D3D12_UAV_DIMENSION_BUFFER;
uav_desc.Format = DXGI_FORMAT_R32_UINT;
uav_desc.Buffer.NumElements = 128 * 1024 / 4;
uav_desc.Buffer.FirstElement = 0;
ID3D12Device_CreateUnorderedAccessView(context.device, sparse, NULL, &uav_desc,
ID3D12DescriptorHeap_GetCPUDescriptorHandleForHeapStart(cpu));
ID3D12Device_CreateUnorderedAccessView(context.device, sparse, NULL, &uav_desc,
ID3D12DescriptorHeap_GetCPUDescriptorHandleForHeapStart(gpu));
memset(&srv_desc, 0, sizeof(srv_desc));
srv_desc.Buffer.FirstElement = 0;
srv_desc.Buffer.NumElements = 2 * 1024 * 1024;
srv_desc.ViewDimension = D3D12_SRV_DIMENSION_BUFFER;
srv_desc.Format = DXGI_FORMAT_R32_UINT;
srv_desc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
h_cpu = ID3D12DescriptorHeap_GetCPUDescriptorHandleForHeapStart(gpu);
h_cpu.ptr += ID3D12Device_GetDescriptorHandleIncrementSize(context.device, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
ID3D12Device_CreateShaderResourceView(context.device, sparse, &srv_desc, h_cpu);
ID3D12GraphicsCommandList_SetDescriptorHeaps(context.list, 1, &gpu);
ID3D12GraphicsCommandList_ClearUnorderedAccessViewUint(context.list,
ID3D12DescriptorHeap_GetGPUDescriptorHandleForHeapStart(gpu),
ID3D12DescriptorHeap_GetCPUDescriptorHandleForHeapStart(cpu), sparse, values, 0, NULL);
transition_resource_state(context.list, sparse,
D3D12_RESOURCE_STATE_UNORDERED_ACCESS, D3D12_RESOURCE_STATE_COPY_SOURCE);
ID3D12GraphicsCommandList_CopyBufferRegion(context.list, buffer, 0, sparse, 0, 128 * 1024);
transition_resource_state(context.list, buffer,
D3D12_RESOURCE_STATE_COPY_DEST, D3D12_RESOURCE_STATE_COPY_SOURCE);
get_buffer_readback_with_command_list(buffer, DXGI_FORMAT_R32_UINT,
&rb, context.queue, context.list);
reset_command_list(context.list, context.allocator);
ok(get_readback_uint(&rb, 0, 0, 0) == 42, "Got #%x, expected 42.\n", get_readback_uint(&rb, 0, 0, 0));
ok(get_readback_uint(&rb, 64 * 1024 / 4, 0, 0) == 42, "Got #%x, expected 42.\n", get_readback_uint(&rb, 64 * 1024 / 4, 0, 0));
release_resource_readback(&rb);
ID3D12Heap_Release(heap);
/* Access a resource where we can hypothetically access the freed heap memory. */
/* On AMD Windows native at least, if we read the freed region, we read garbage, which proves it's not required to unbind explicitly.
* We'd read 0 in that case. */
ID3D12GraphicsCommandList_CopyBufferRegion(context.list, buffer, 0, sparse, 64 * 1024, 64 * 1024);
#define EXPLORE_UNDEFINED_BEHAVIOR 0
#if EXPLORE_UNDEFINED_BEHAVIOR
/* This reads unmapped memory. */
ID3D12GraphicsCommandList_CopyBufferRegion(context.list, buffer, 1024, sparse, 1024, 1024);
#endif
transition_resource_state(context.list, buffer, D3D12_RESOURCE_STATE_COPY_DEST, D3D12_RESOURCE_STATE_UNORDERED_ACCESS);
h_gpu = ID3D12DescriptorHeap_GetGPUDescriptorHandleForHeapStart(gpu);
h_gpu.ptr += ID3D12Device_GetDescriptorHandleIncrementSize(context.device, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
ID3D12GraphicsCommandList_SetDescriptorHeaps(context.list, 1, &gpu);
ID3D12GraphicsCommandList_SetComputeRootSignature(context.list, context.root_signature);
ID3D12GraphicsCommandList_SetPipelineState(context.list, context.pipeline_state);
ID3D12GraphicsCommandList_SetComputeRootUnorderedAccessView(context.list, 0, ID3D12Resource_GetGPUVirtualAddress(buffer));
ID3D12GraphicsCommandList_SetComputeRootDescriptorTable(context.list, 1, h_gpu);
#if EXPLORE_UNDEFINED_BEHAVIOR
ID3D12GraphicsCommandList_Dispatch(context.list, 1, 1, 1);
#endif
transition_resource_state(context.list, buffer, D3D12_RESOURCE_STATE_UNORDERED_ACCESS, D3D12_RESOURCE_STATE_COPY_SOURCE);
get_buffer_readback_with_command_list(buffer, DXGI_FORMAT_R32_UINT,
&rb, context.queue, context.list);
#if EXPLORE_UNDEFINED_BEHAVIOR
skip("Reading undefined value #%x.\n", get_readback_uint(&rb, 0, 0, 0));
skip("Reading value #%x (expect 0).\n", get_readback_uint(&rb, 1, 0, 0));
skip("Reading undefined value #%x.\n", get_readback_uint(&rb, 1024 / 4, 0, 0));
#endif
ok(get_readback_uint(&rb, 2048 / 4, 0, 0) == 42, "Got #%x, expected 42.\n", get_readback_uint(&rb, 2048 / 4, 0, 0));
ok(get_readback_uint(&rb, 64 * 1024 / 4, 0, 0) == 42, "Got #%x, expected 42.\n", get_readback_uint(&rb, 64 * 1024 / 4, 0, 0));
release_resource_readback(&rb);
ID3D12Resource_Release(buffer);
ID3D12Resource_Release(sparse);
ID3D12DescriptorHeap_Release(cpu);
ID3D12DescriptorHeap_Release(gpu);
ID3D12Heap_Release(heap_live);
destroy_test_context(&context);
}

View File

@ -1187,247 +1187,3 @@ void test_create_fence(void)
ok(!refcount, "ID3D12Device has %u references left.\n", (unsigned int)refcount);
}
void test_fence_wait_robustness_inner(bool shared_handles)
{
VKD3D_UNUSED HANDLE shared_signal = NULL;
VKD3D_UNUSED HANDLE shared_drain = NULL;
VKD3D_UNUSED HANDLE shared_wait = NULL;
ID3D12CommandAllocator *allocator[2];
ID3D12Fence *signal_fence_dup = NULL;
D3D12_COMMAND_QUEUE_DESC queue_desc;
ID3D12Fence *drain_fence_dup = NULL;
ID3D12Fence *wait_fence_dup = NULL;
ID3D12GraphicsCommandList *list[2];
ID3D12CommandQueue *compute_queue;
struct test_context context;
ID3D12Fence *signal_fence;
ID3D12Fence *drain_fence;
ID3D12Fence *wait_fence;
ID3D12Resource *src;
ID3D12Resource *dst;
unsigned int i;
HANDLE event;
UINT value;
HRESULT hr;
if (!init_compute_test_context(&context))
return;
hr = ID3D12Device_CreateFence(context.device, 0,
shared_handles ? D3D12_FENCE_FLAG_SHARED : D3D12_FENCE_FLAG_NONE,
&IID_ID3D12Fence, (void**)&signal_fence);
todo_if(shared_handles) ok(SUCCEEDED(hr), "Failed to create fence, hr #%x.\n", hr);
if (FAILED(hr))
{
skip("Failed to create fence, skipping test ...\n");
destroy_test_context(&context);
return;
}
hr = ID3D12Device_CreateFence(context.device, 0,
shared_handles ? D3D12_FENCE_FLAG_SHARED : D3D12_FENCE_FLAG_NONE,
&IID_ID3D12Fence, (void**)&wait_fence);
ok(SUCCEEDED(hr), "Failed to create fence, hr #%x.\n", hr);
if (FAILED(hr))
{
skip("Failed to create fence, skipping test ...\n");
ID3D12Fence_Release(signal_fence);
destroy_test_context(&context);
return;
}
hr = ID3D12Device_CreateFence(context.device, 0,
shared_handles ? D3D12_FENCE_FLAG_SHARED : D3D12_FENCE_FLAG_NONE,
&IID_ID3D12Fence, (void**)&drain_fence);
ok(SUCCEEDED(hr), "Failed to create fence, hr #%x.\n", hr);
if (FAILED(hr))
{
skip("Failed to create fence, skipping test ...\n");
ID3D12Fence_Release(signal_fence);
ID3D12Fence_Release(wait_fence);
destroy_test_context(&context);
return;
}
#ifdef _WIN32
if (shared_handles)
{
hr = ID3D12Device_CreateSharedHandle(context.device, (ID3D12DeviceChild*)signal_fence,
NULL, GENERIC_ALL, NULL, &shared_signal);
ok(SUCCEEDED(hr), "Failed to create shared handle, hr #%x.\n", hr);
hr = ID3D12Device_CreateSharedHandle(context.device, (ID3D12DeviceChild*)wait_fence,
NULL, GENERIC_ALL, NULL, &shared_wait);
ok(SUCCEEDED(hr), "Failed to create shared handle, hr #%x.\n", hr);
hr = ID3D12Device_CreateSharedHandle(context.device, (ID3D12DeviceChild*)drain_fence,
NULL, GENERIC_ALL, NULL, &shared_drain);
ok(SUCCEEDED(hr), "Failed to create shared handle, hr #%x.\n", hr);
ID3D12Fence_Release(signal_fence);
ID3D12Fence_Release(wait_fence);
ID3D12Fence_Release(drain_fence);
hr = ID3D12Device_OpenSharedHandle(context.device, shared_signal, &IID_ID3D12Fence, (void**)&signal_fence);
ok(SUCCEEDED(hr), "Failed to open shared handle, hr #%x.\n", hr);
hr = ID3D12Device_OpenSharedHandle(context.device, shared_wait, &IID_ID3D12Fence, (void**)&wait_fence);
ok(SUCCEEDED(hr), "Failed to open shared handle, hr #%x.\n", hr);
hr = ID3D12Device_OpenSharedHandle(context.device, shared_drain, &IID_ID3D12Fence, (void**)&drain_fence);
ok(SUCCEEDED(hr), "Failed to open shared handle, hr #%x.\n", hr);
/* OpenSharedHandle takes a kernel level reference on the HANDLE. */
hr = ID3D12Device_OpenSharedHandle(context.device, shared_signal, &IID_ID3D12Fence, (void**)&signal_fence_dup);
ok(SUCCEEDED(hr), "Failed to open shared handle, hr #%x.\n", hr);
hr = ID3D12Device_OpenSharedHandle(context.device, shared_wait, &IID_ID3D12Fence, (void**)&wait_fence_dup);
ok(SUCCEEDED(hr), "Failed to open shared handle, hr #%x.\n", hr);
hr = ID3D12Device_OpenSharedHandle(context.device, shared_drain, &IID_ID3D12Fence, (void**)&drain_fence_dup);
ok(SUCCEEDED(hr), "Failed to open shared handle, hr #%x.\n", hr);
/* Observed behavior: Closing the last reference to the kernel HANDLE object unblocks all waiters.
* This isn't really implementable in Wine as it stands since applications are free to share
* the HANDLE and Dupe it arbitrarily.
* For now, assume this is not a thing, we can report TDR-like situations if this comes up in practice. */
if (shared_signal)
CloseHandle(shared_signal);
if (shared_wait)
CloseHandle(shared_wait);
if (shared_drain)
CloseHandle(shared_drain);
}
#endif
memset(&queue_desc, 0, sizeof(queue_desc));
queue_desc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
queue_desc.Priority = D3D12_COMMAND_QUEUE_PRIORITY_NORMAL;
queue_desc.Type = D3D12_COMMAND_LIST_TYPE_COMPUTE;
src = create_default_buffer(context.device, 256 * 1024 * 1024, D3D12_RESOURCE_FLAG_NONE, D3D12_RESOURCE_STATE_COPY_SOURCE);
dst = create_default_buffer(context.device, 256 * 1024 * 1024, D3D12_RESOURCE_FLAG_NONE, D3D12_RESOURCE_STATE_COPY_DEST);
ID3D12Device_CreateCommandQueue(context.device, &queue_desc, &IID_ID3D12CommandQueue, (void**)&compute_queue);
for (i = 0; i < 2; i++)
{
ID3D12Device_CreateCommandAllocator(context.device, D3D12_COMMAND_LIST_TYPE_COMPUTE,
&IID_ID3D12CommandAllocator, (void**)&allocator[i]);
ID3D12Device_CreateCommandList(context.device, 0, D3D12_COMMAND_LIST_TYPE_COMPUTE, allocator[i], NULL,
&IID_ID3D12GraphicsCommandList, (void**)&list[i]);
}
/* Heavy copy action. */
for (i = 0; i < 128; i++)
{
ID3D12GraphicsCommandList_CopyResource(list[0], dst, src);
ID3D12GraphicsCommandList_CopyResource(list[1], src, dst);
}
ID3D12GraphicsCommandList_Close(list[0]);
ID3D12GraphicsCommandList_Close(list[1]);
/* Note on ref-count checks: The debug layers can take transient public ref-counts it seems. */
ID3D12CommandQueue_ExecuteCommandLists(context.queue, 1, (ID3D12CommandList * const *)&list[0]);
ID3D12CommandQueue_Signal(context.queue, signal_fence, 1);
/* Validate that signal/wait does not take public ref-counts. */
value = get_refcount(signal_fence);
ok(value == 1, "Unexpected ref-count %u\n", value);
/* The GPU copy is 32 GB worth of BW. There is literally zero chance it would have completed in this amount of time. */
value = (UINT)ID3D12Fence_GetCompletedValue(signal_fence);
ok(value == 0, "Unexpected signal event %u.\n", value);
/* Try waiting for a signal that never comes. We'll be able to unblock this wait
* when we fully release the fence. */
ID3D12CommandQueue_Wait(compute_queue, signal_fence, UINT64_MAX);
value = get_refcount(signal_fence);
ok(value == 1, "Unexpected ref-count %u\n", value);
ID3D12CommandQueue_Signal(compute_queue, wait_fence, 1);
value = get_refcount(wait_fence);
ok(value == 1, "Unexpected ref-count %u\n", value);
/* The GPU copy is 32 GB worth of BW. There is literally zero chance it would have completed in this amount of time. */
value = (UINT)ID3D12Fence_GetCompletedValue(wait_fence);
ok(value == 0, "Unexpected signal event %u.\n", value);
value = (UINT)ID3D12Fence_GetCompletedValue(signal_fence);
ok(value == 0, "Unexpected signal event %u.\n", value);
ID3D12CommandQueue_Wait(compute_queue, wait_fence, 1);
value = get_refcount(wait_fence);
ok(value == 1, "Unexpected ref-count %u\n", value);
/* Check that we can queue up event completion.
* Again, verify that releasing the fence unblocks all waiters ... */
event = create_event();
ID3D12Fence_SetEventOnCompletion(signal_fence, UINT64_MAX, event);
if (signal_fence_dup)
ID3D12Fence_Release(signal_fence_dup);
if (wait_fence_dup)
ID3D12Fence_Release(wait_fence_dup);
/* The GPU copy is 32 GB worth of BW. There is literally zero chance it would have completed in this amount of time.
* Makes sure that the fences aren't signalled when we try to free them.
* (Sure, there is a theoretical race condition if GPU completes between this check and the release, but seriously ...). */
value = (UINT)ID3D12Fence_GetCompletedValue(signal_fence);
ok(value == 0, "Unexpected signal event %u.\n", value);
value = (UINT)ID3D12Fence_GetCompletedValue(wait_fence);
ok(value == 0, "Unexpected signal event %u.\n", value);
/* Test that it's valid to release fence while it's in flight.
* If we don't cause device lost and drain_fence is waited on successfully we pass the test. */
value = ID3D12Fence_Release(signal_fence);
ok(value == 0, "Unexpected fence ref-count %u.\n", value);
value = ID3D12Fence_Release(wait_fence);
ok(value == 0, "Unexpected fence ref-count %u.\n", value);
ID3D12CommandQueue_ExecuteCommandLists(compute_queue, 1, (ID3D12CommandList * const *)&list[1]);
ID3D12CommandQueue_Signal(compute_queue, drain_fence, 1);
wait_event(event, INFINITE);
destroy_event(event);
ID3D12Fence_SetEventOnCompletion(drain_fence, 1, NULL);
value = (UINT)ID3D12Fence_GetCompletedValue(drain_fence);
ok(value == 1, "Expected fence wait value 1, but got %u.\n", value);
if (drain_fence_dup)
{
/* Check we observe the counter in sibling fences as well. */
value = (UINT)ID3D12Fence_GetCompletedValue(drain_fence_dup);
ok(value == 1, "Expected fence wait value 1, but got %u.\n", value);
ID3D12Fence_Release(drain_fence_dup);
}
value = ID3D12Fence_Release(drain_fence);
ok(value == 0, "Unexpected fence ref-count %u.\n", value);
/* Early freeing of fences might signal the drain fence too early, causing GPU hang. */
wait_queue_idle(context.device, context.queue);
wait_queue_idle(context.device, compute_queue);
ID3D12CommandQueue_Release(compute_queue);
for (i = 0; i < 2; i++)
{
ID3D12CommandAllocator_Release(allocator[i]);
ID3D12GraphicsCommandList_Release(list[i]);
}
ID3D12Resource_Release(dst);
ID3D12Resource_Release(src);
destroy_test_context(&context);
}
void test_fence_wait_robustness(void)
{
test_fence_wait_robustness_inner(false);
}
void test_fence_wait_robustness_shared(void)
{
#ifdef _WIN32
test_fence_wait_robustness_inner(true);
#else
skip("Shared fences not supported on native Linux build.\n");
#endif
}

View File

@ -27,10 +27,6 @@ PFN_D3D12_GET_DEBUG_INTERFACE pfn_D3D12GetDebugInterface;
const char *vkd3d_test_platform = "other";
struct vkd3d_test_state_context vkd3d_test_state;
#ifdef _WIN32
RENDERDOC_API_1_0_0 *renderdoc_api;
#endif
bool compare_float(float f, float g, int ulps)
{
int x, y;
@ -865,7 +861,6 @@ ID3D12CommandSignature *create_command_signature_(unsigned int line,
bool init_compute_test_context_(unsigned int line, struct test_context *context)
{
D3D12_COMMAND_LIST_TYPE command_list_type = D3D12_COMMAND_LIST_TYPE_COMPUTE;
ID3D12Device *device;
HRESULT hr;
@ -878,21 +873,14 @@ bool init_compute_test_context_(unsigned int line, struct test_context *context)
}
device = context->device;
#ifdef _WIN32
begin_renderdoc_capturing(device);
/* Workaround RenderDoc bug. It expects a DIRECT command queue to exist. */
if (renderdoc_api)
command_list_type = D3D12_COMMAND_LIST_TYPE_DIRECT;
#endif
context->queue = create_command_queue_(line, device,
command_list_type, D3D12_COMMAND_QUEUE_PRIORITY_NORMAL);
D3D12_COMMAND_LIST_TYPE_COMPUTE, D3D12_COMMAND_QUEUE_PRIORITY_NORMAL);
hr = ID3D12Device_CreateCommandAllocator(device, command_list_type,
hr = ID3D12Device_CreateCommandAllocator(device, D3D12_COMMAND_LIST_TYPE_COMPUTE,
&IID_ID3D12CommandAllocator, (void **)&context->allocator);
ok_(line)(hr == S_OK, "Failed to create command allocator, hr %#x.\n", hr);
hr = ID3D12Device_CreateCommandList(device, 0, command_list_type,
hr = ID3D12Device_CreateCommandList(device, 0, D3D12_COMMAND_LIST_TYPE_COMPUTE,
context->allocator, NULL, &IID_ID3D12GraphicsCommandList, (void **)&context->list);
ok_(line)(hr == S_OK, "Failed to create command list, hr %#x.\n", hr);

View File

@ -19,10 +19,6 @@
#ifndef __VKD3D_D3D12_TEST_UTILS_H
#define __VKD3D_D3D12_TEST_UTILS_H
#ifdef _WIN32
#include "renderdoc_app.h"
#endif
#define SHADER_BYTECODE(code) {code,sizeof(code)}
#define wait_queue_idle(a, b) wait_queue_idle_(__LINE__, a, b)
@ -1053,45 +1049,6 @@ static inline void create_render_target_(unsigned int line, struct test_context
ID3D12Device_CreateRenderTargetView(context->device, *render_target, NULL, *rtv);
}
/* Utility code for capturing native D3D12 tests, which is why this only covers Win32.
* Launch the d3d12.exe test binary from RenderDoc UI.
* For Vulkan capturing, use VKD3D_AUTO_CAPTURE_COUNTS and friends instead. */
#ifdef _WIN32
extern RENDERDOC_API_1_0_0 *renderdoc_api;
static inline void begin_renderdoc_capturing(ID3D12Device *device)
{
pRENDERDOC_GetAPI get_api;
HANDLE renderdoc;
FARPROC fn_ptr;
if (!renderdoc_api)
{
renderdoc = GetModuleHandleA("renderdoc.dll");
if (renderdoc)
{
fn_ptr = GetProcAddress(renderdoc, "RENDERDOC_GetAPI");
if (fn_ptr)
{
/* Workaround compiler warnings about casting to function pointer. */
memcpy(&get_api, &fn_ptr, sizeof(fn_ptr));
if (!get_api(eRENDERDOC_API_Version_1_0_0, (void **)&renderdoc_api))
renderdoc_api = NULL;
}
}
}
if (renderdoc_api)
renderdoc_api->StartFrameCapture(device, NULL);
}
static inline void end_renderdoc_capturing(ID3D12Device *device)
{
if (renderdoc_api)
renderdoc_api->EndFrameCapture(device, NULL);
}
#endif
#define init_test_context(context, desc) init_test_context_(__LINE__, context, desc)
static inline bool init_test_context_(unsigned int line, struct test_context *context,
const struct test_context_desc *desc)
@ -1109,10 +1066,6 @@ static inline bool init_test_context_(unsigned int line, struct test_context *co
}
device = context->device;
#ifdef _WIN32
begin_renderdoc_capturing(device);
#endif
context->queue = create_command_queue_(line, device, D3D12_COMMAND_LIST_TYPE_DIRECT, D3D12_COMMAND_QUEUE_PRIORITY_NORMAL);
hr = ID3D12Device_CreateCommandAllocator(device, D3D12_COMMAND_LIST_TYPE_DIRECT,
@ -1164,10 +1117,6 @@ static inline void destroy_test_context_(unsigned int line, struct test_context
{
ULONG refcount;
#ifdef _WIN32
end_renderdoc_capturing(context->device);
#endif
if (context->pipeline_state)
ID3D12PipelineState_Release(context->pipeline_state);
if (context->root_signature)

View File

@ -123,8 +123,6 @@ decl_test(test_tgsm);
decl_test(test_uav_load);
decl_test(test_cs_uav_store);
decl_test(test_uav_counters);
decl_test(test_uav_counter_null_behavior_dxbc);
decl_test(test_uav_counter_null_behavior_dxil);
decl_test(test_decrement_uav_counter);
decl_test(test_atomic_instructions_dxbc);
decl_test(test_atomic_instructions_dxil);
@ -137,7 +135,6 @@ decl_test(test_resolve_non_issued_query_data);
decl_test(test_resolve_query_data_in_different_command_list);
decl_test(test_resolve_query_data_in_reordered_command_list);
decl_test(test_execute_indirect);
decl_test(test_execute_indirect_state);
decl_test(test_dispatch_zero_thread_groups);
decl_test(test_unaligned_vertex_stride);
decl_test(test_zero_vertex_stride);
@ -297,7 +294,6 @@ decl_test(test_integer_blending_pipeline_state);
decl_test(test_discard_resource_uav);
decl_test(test_unbound_rtv_rendering);
decl_test(test_raytracing_local_rs_static_sampler);
decl_test(test_raytracing_local_rs_static_sampler_collection);
decl_test(test_rayquery);
decl_test(test_typed_srv_uav_cast);
decl_test(test_typed_srv_cast_clear);
@ -311,13 +307,3 @@ decl_test(test_shader_waveop_maximal_convergence);
decl_test(test_uav_3d_sliced_view);
decl_test(test_pipeline_no_ps_nonzero_rts);
decl_test(test_root_descriptor_offset_sign);
decl_test(test_raytracing_no_global_root_signature);
decl_test(test_raytracing_missing_required_objects);
decl_test(test_raytracing_reject_duplicate_objects);
decl_test(test_raytracing_embedded_subobjects);
decl_test(test_raytracing_default_association_tiebreak);
decl_test(test_raytracing_collection_identifiers);
decl_test(test_fence_wait_robustness);
decl_test(test_fence_wait_robustness_shared);
decl_test(test_root_signature_empty_blob);
decl_test(test_sparse_buffer_memory_lifetime);