Compare commits

...

4 Commits

Author SHA1 Message Date
CamilleLaVey
4c478e07b2 [spv, qcom] Implement warp intrinsics support 2025-12-24 16:41:49 -03:00
Caio Oliveira
00ec67d65b [android] Properly set Root of the Project (#3177)
Signed-off-by: Caio Oliveira <caiooliveirafarias0@gmail.com>
Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/3177
Reviewed-by: MaranBr <maranbr@eden-emu.dev>
Reviewed-by: Lizzie <lizzie@eden-emu.dev>
Co-authored-by: Caio Oliveira <caiooliveirafarias0@gmail.com>
Co-committed-by: Caio Oliveira <caiooliveirafarias0@gmail.com>
2025-12-23 22:23:56 +01:00
Marcin Serwin
fbd28a9d34 [cmake] fix dynarmic tests (#3192)
The old style `add_test` invocation is not target aware which means that
trying to run the test via `ninja test` results in:

    Could not find executable dynarmic_tests

Signed-off-by: Marcin Serwin <marcin@serwin.dev>

Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/3192
Co-authored-by: Marcin Serwin <marcin@serwin.dev>
Co-committed-by: Marcin Serwin <marcin@serwin.dev>
2025-12-23 01:34:58 +01:00
Caio Oliveira
3413fbd9da [FIXUP] Partially revert "[NCE] Fix cache invalidation and signal interrupt race condition (#3063)" (#3190)
* this fixes Jamboree and SSB

This reverts commit e3c942b209.

Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/3190
Reviewed-by: Maufeat <sahyno1996@gmail.com>
Reviewed-by: MaranBr <maranbr@eden-emu.dev>
Co-authored-by: Caio Oliveira <caiooliveirafarias0@gmail.com>
Co-committed-by: Caio Oliveira <caiooliveirafarias0@gmail.com>
2025-12-22 02:58:40 +01:00
13 changed files with 224 additions and 32 deletions

View File

@@ -18,6 +18,7 @@ plugins {
id("androidx.navigation.safeargs.kotlin")
id("org.jlleitschuh.gradle.ktlint") version "11.4.0"
id("com.github.triplet.play") version "3.8.6"
id("idea")
}
/**
@@ -27,6 +28,8 @@ plugins {
*/
val autoVersion = (((System.currentTimeMillis() / 1000) - 1451606400) / 10).toInt()
val edenDir = project(":Eden").projectDir
@Suppress("UnstableApiUsage")
android {
namespace = "org.yuzu.yuzu_emu"
@@ -241,11 +244,17 @@ android {
externalNativeBuild {
cmake {
version = "3.22.1"
path = file("../../../CMakeLists.txt")
path = file("${edenDir}/CMakeLists.txt")
}
}
}
idea {
module {
// Inclusion to exclude build/ dir from non-Android
excludeDirs.add(file("${edenDir}/build"))
}
}
tasks.register<Delete>("ktlintReset", fun Delete.() {
delete(File(layout.buildDirectory.toString() + File.separator + "intermediates/ktLint"))
@@ -346,7 +355,7 @@ fun getGitVersion(): String {
}
afterEvaluate {
val artifactsDir = layout.projectDirectory.dir("../../../artifacts")
val artifactsDir = layout.projectDirectory.dir("${edenDir}/artifacts")
val outputsDir = layout.buildDirectory.dir("outputs").get()
android.applicationVariants.forEach { variant ->

View File

@@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
@@ -19,3 +22,6 @@ dependencyResolutionManagement {
}
include(":app")
include("Eden")
project(":Eden").projectDir = file("../..")

View File

@@ -391,28 +391,15 @@ const std::size_t CACHE_PAGE_SIZE = 4096;
void ArmNce::ClearInstructionCache() {
#ifdef __aarch64__
// Use IC IALLU to actually invalidate L1 instruction cache
// Ensure all previous memory operations complete
asm volatile("dsb ish\n"
"ic iallu\n"
"dsb ish\n"
"isb" ::: "memory");
#endif
}
void ArmNce::InvalidateCacheRange(u64 addr, std::size_t size) {
#ifdef ARCHITECTURE_arm64
// Invalidate instruction cache for specific range instead of full flush
constexpr u64 cache_line_size = 64;
const u64 aligned_addr = addr & ~(cache_line_size - 1);
const u64 end_addr = (addr + size + cache_line_size - 1) & ~(cache_line_size - 1);
asm volatile("dsb ish" ::: "memory");
for (u64 i = aligned_addr; i < end_addr; i += cache_line_size) {
asm volatile("ic ivau, %0" :: "r"(i) : "memory");
}
asm volatile("dsb ish\n"
"isb" ::: "memory");
#endif
this->ClearInstructionCache();
}
} // namespace Core

View File

@@ -134,4 +134,4 @@ target_include_directories(dynarmic_tests PRIVATE . ../src)
target_compile_options(dynarmic_tests PRIVATE ${DYNARMIC_CXX_FLAGS})
target_compile_definitions(dynarmic_tests PRIVATE FMT_USE_USER_DEFINED_LITERALS=1)
add_test(dynarmic_tests dynarmic_tests --durations yes)
add_test(NAME dynarmic_tests COMMAND dynarmic_tests --durations yes)

View File

@@ -11,6 +11,7 @@
#include <vector>
#include <spirv-tools/optimizer.hpp>
#include "common/logging/log.h"
#include "common/settings.h"
#include "shader_recompiler/backend/spirv/emit_spirv.h"
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
@@ -439,15 +440,23 @@ void SetupCapabilities(const Profile& profile, const Info& info, EmitContext& ct
ctx.AddExtension("SPV_KHR_shader_draw_parameters");
ctx.AddCapability(spv::Capability::DrawParameters);
}
if ((info.uses_subgroup_vote || info.uses_subgroup_invocation_id ||
info.uses_subgroup_shuffles) &&
profile.support_vote) {
const bool stage_supports_warp = profile.SupportsWarpIntrinsics(ctx.stage);
const bool needs_warp_intrinsics = info.uses_subgroup_vote ||
info.uses_subgroup_invocation_id ||
info.uses_subgroup_shuffles;
if (needs_warp_intrinsics && profile.support_vote && stage_supports_warp) {
ctx.AddCapability(spv::Capability::GroupNonUniformBallot);
ctx.AddCapability(spv::Capability::GroupNonUniformShuffle);
if (!profile.warp_size_potentially_larger_than_guest) {
// vote ops are only used when not taking the long path
ctx.AddCapability(spv::Capability::GroupNonUniformVote);
}
} else if (needs_warp_intrinsics && !stage_supports_warp) {
LOG_WARNING(Shader,
"Warp intrinsics requested in stage {} but the device does not report subgroup "
"support; falling back to scalar approximations",
static_cast<u32>(ctx.stage));
}
if (info.uses_int64_bit_atomics && profile.support_int64_atomics) {
ctx.AddCapability(spv::Capability::Int64Atomics);

View File

@@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -78,9 +81,25 @@ Id AddPartitionBase(EmitContext& ctx, Id thread_id) {
const Id partition_base{ctx.OpShiftLeftLogical(ctx.U32[1], partition_idx, ctx.Const(5u))};
return ctx.OpIAdd(ctx.U32[1], thread_id, partition_base);
}
bool SupportsWarpIntrinsics(const EmitContext& ctx) {
return ctx.profile.SupportsWarpIntrinsics(ctx.stage);
}
void SetAlwaysInBounds(EmitContext& ctx, IR::Inst* inst) {
SetInBoundsFlag(inst, ctx.true_value);
}
Id FallbackBallotMask(EmitContext& ctx, Id pred) {
const Id full_mask{ctx.Const(0xFFFFFFFFu)};
return ctx.OpSelect(ctx.U32[1], pred, full_mask, ctx.u32_zero_value);
}
} // Anonymous namespace
Id EmitLaneId(EmitContext& ctx) {
if (!SupportsWarpIntrinsics(ctx)) {
return ctx.u32_zero_value;
}
const Id id{GetThreadId(ctx)};
if (!ctx.profile.warp_size_potentially_larger_than_guest) {
return id;
@@ -89,6 +108,9 @@ Id EmitLaneId(EmitContext& ctx) {
}
Id EmitVoteAll(EmitContext& ctx, Id pred) {
if (!SupportsWarpIntrinsics(ctx)) {
return pred;
}
if (!ctx.profile.warp_size_potentially_larger_than_guest) {
return ctx.OpGroupNonUniformAll(ctx.U1, SubgroupScope(ctx), pred);
}
@@ -102,6 +124,9 @@ Id EmitVoteAll(EmitContext& ctx, Id pred) {
}
Id EmitVoteAny(EmitContext& ctx, Id pred) {
if (!SupportsWarpIntrinsics(ctx)) {
return pred;
}
if (!ctx.profile.warp_size_potentially_larger_than_guest) {
return ctx.OpGroupNonUniformAny(ctx.U1, SubgroupScope(ctx), pred);
}
@@ -115,6 +140,9 @@ Id EmitVoteAny(EmitContext& ctx, Id pred) {
}
Id EmitVoteEqual(EmitContext& ctx, Id pred) {
if (!SupportsWarpIntrinsics(ctx)) {
return pred;
}
if (!ctx.profile.warp_size_potentially_larger_than_guest) {
return ctx.OpGroupNonUniformAllEqual(ctx.U1, SubgroupScope(ctx), pred);
}
@@ -129,6 +157,9 @@ Id EmitVoteEqual(EmitContext& ctx, Id pred) {
}
Id EmitSubgroupBallot(EmitContext& ctx, Id pred) {
if (!SupportsWarpIntrinsics(ctx)) {
return FallbackBallotMask(ctx, pred);
}
const Id ballot{ctx.OpGroupNonUniformBallot(ctx.U32[4], SubgroupScope(ctx), pred)};
if (!ctx.profile.warp_size_potentially_larger_than_guest) {
return ctx.OpCompositeExtract(ctx.U32[1], ballot, 0U);
@@ -137,27 +168,46 @@ Id EmitSubgroupBallot(EmitContext& ctx, Id pred) {
}
Id EmitSubgroupEqMask(EmitContext& ctx) {
if (!SupportsWarpIntrinsics(ctx)) {
return ctx.u32_zero_value;
}
return LoadMask(ctx, ctx.subgroup_mask_eq);
}
Id EmitSubgroupLtMask(EmitContext& ctx) {
if (!SupportsWarpIntrinsics(ctx)) {
return ctx.u32_zero_value;
}
return LoadMask(ctx, ctx.subgroup_mask_lt);
}
Id EmitSubgroupLeMask(EmitContext& ctx) {
if (!SupportsWarpIntrinsics(ctx)) {
return ctx.u32_zero_value;
}
return LoadMask(ctx, ctx.subgroup_mask_le);
}
Id EmitSubgroupGtMask(EmitContext& ctx) {
if (!SupportsWarpIntrinsics(ctx)) {
return ctx.u32_zero_value;
}
return LoadMask(ctx, ctx.subgroup_mask_gt);
}
Id EmitSubgroupGeMask(EmitContext& ctx) {
if (!SupportsWarpIntrinsics(ctx)) {
return ctx.u32_zero_value;
}
return LoadMask(ctx, ctx.subgroup_mask_ge);
}
Id EmitShuffleIndex(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clamp,
Id segmentation_mask) {
if (!SupportsWarpIntrinsics(ctx)) {
SetAlwaysInBounds(ctx, inst);
return value;
}
const Id not_seg_mask{ctx.OpNot(ctx.U32[1], segmentation_mask)};
const Id thread_id{EmitLaneId(ctx)};
const Id min_thread_id{ComputeMinThreadId(ctx, thread_id, segmentation_mask)};
@@ -177,6 +227,10 @@ Id EmitShuffleIndex(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id cla
Id EmitShuffleUp(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clamp,
Id segmentation_mask) {
if (!SupportsWarpIntrinsics(ctx)) {
SetAlwaysInBounds(ctx, inst);
return value;
}
const Id thread_id{EmitLaneId(ctx)};
const Id max_thread_id{GetMaxThreadId(ctx, thread_id, clamp, segmentation_mask)};
Id src_thread_id{ctx.OpISub(ctx.U32[1], thread_id, index)};
@@ -192,6 +246,10 @@ Id EmitShuffleUp(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clamp,
Id EmitShuffleDown(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clamp,
Id segmentation_mask) {
if (!SupportsWarpIntrinsics(ctx)) {
SetAlwaysInBounds(ctx, inst);
return value;
}
const Id thread_id{EmitLaneId(ctx)};
const Id max_thread_id{GetMaxThreadId(ctx, thread_id, clamp, segmentation_mask)};
Id src_thread_id{ctx.OpIAdd(ctx.U32[1], thread_id, index)};
@@ -207,6 +265,10 @@ Id EmitShuffleDown(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clam
Id EmitShuffleButterfly(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clamp,
Id segmentation_mask) {
if (!SupportsWarpIntrinsics(ctx)) {
SetAlwaysInBounds(ctx, inst);
return value;
}
const Id thread_id{EmitLaneId(ctx)};
const Id max_thread_id{GetMaxThreadId(ctx, thread_id, clamp, segmentation_mask)};
Id src_thread_id{ctx.OpBitwiseXor(ctx.U32[1], thread_id, index)};

View File

@@ -1416,6 +1416,7 @@ void EmitContext::DefineImages(const Info& info, u32& binding, u32& scaling_inde
void EmitContext::DefineInputs(const IR::Program& program) {
const Info& info{program.info};
const VaryingState loads{info.loads.mask | info.passthrough.mask};
const bool stage_supports_warp = profile.SupportsWarpIntrinsics(stage);
if (info.uses_workgroup_id) {
workgroup_id = DefineInput(*this, U32[3], false, spv::BuiltIn::WorkgroupId);
@@ -1436,16 +1437,17 @@ void EmitContext::DefineInputs(const IR::Program& program) {
if (info.uses_is_helper_invocation) {
is_helper_invocation = DefineInput(*this, U1, false, spv::BuiltIn::HelperInvocation);
}
if (info.uses_subgroup_mask) {
if (info.uses_subgroup_mask && stage_supports_warp) {
subgroup_mask_eq = DefineInput(*this, U32[4], false, spv::BuiltIn::SubgroupEqMaskKHR);
subgroup_mask_lt = DefineInput(*this, U32[4], false, spv::BuiltIn::SubgroupLtMaskKHR);
subgroup_mask_le = DefineInput(*this, U32[4], false, spv::BuiltIn::SubgroupLeMaskKHR);
subgroup_mask_gt = DefineInput(*this, U32[4], false, spv::BuiltIn::SubgroupGtMaskKHR);
subgroup_mask_ge = DefineInput(*this, U32[4], false, spv::BuiltIn::SubgroupGeMaskKHR);
}
if (info.uses_fswzadd || info.uses_subgroup_invocation_id || info.uses_subgroup_shuffles ||
(profile.warp_size_potentially_larger_than_guest &&
(info.uses_subgroup_vote || info.uses_subgroup_mask))) {
if (stage_supports_warp &&
(info.uses_fswzadd || info.uses_subgroup_invocation_id || info.uses_subgroup_shuffles ||
(profile.warp_size_potentially_larger_than_guest &&
(info.uses_subgroup_vote || info.uses_subgroup_mask)))) {
AddCapability(spv::Capability::GroupNonUniform);
subgroup_local_invocation_id =
DefineInput(*this, U32[1], false, spv::BuiltIn::SubgroupLocalInvocationId);

View File

@@ -1,9 +1,15 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <limits>
#include "common/common_types.h"
#include "shader_recompiler/stage.h"
namespace Shader {
@@ -46,6 +52,8 @@ struct Profile {
bool support_multi_viewport{};
bool support_geometry_streams{};
u32 warp_stage_support_mask{std::numeric_limits<u32>::max()};
bool warp_size_potentially_larger_than_guest{};
bool lower_left_origin_mode{};
@@ -90,6 +98,11 @@ struct Profile {
u64 min_ssbo_alignment{};
u32 max_user_clip_distances{};
bool SupportsWarpIntrinsics(Stage stage) const {
const u32 bit = 1u << static_cast<u32>(stage);
return (warp_stage_support_mask & bit) != 0;
}
};
} // namespace Shader

View File

@@ -220,6 +220,7 @@ ShaderCache::ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory_,
.support_gl_sparse_textures = device.HasSparseTexture2(),
.support_gl_derivative_control = device.HasDerivativeControl(),
.support_geometry_streams = true,
.warp_stage_support_mask = 0xFFFFFFFFu,
.warp_size_potentially_larger_than_guest = device.IsWarpSizePotentiallyLargerThanGuest(),

View File

@@ -459,10 +459,14 @@ QueriesPrefixScanPass::QueriesPrefixScanPass(
device_, descriptor_pool_, QUERIES_SCAN_DESCRIPTOR_SET_BINDINGS,
QUERIES_SCAN_DESCRIPTOR_UPDATE_TEMPLATE, QUERIES_SCAN_BANK_INFO,
COMPUTE_PUSH_CONSTANT_RANGE<sizeof(QueriesPrefixScanPushConstants)>,
device_.IsSubgroupFeatureSupported(VK_SUBGROUP_FEATURE_BASIC_BIT) &&
device_.IsSubgroupFeatureSupported(VK_SUBGROUP_FEATURE_ARITHMETIC_BIT) &&
device_.IsSubgroupFeatureSupported(VK_SUBGROUP_FEATURE_SHUFFLE_BIT) &&
device_.IsSubgroupFeatureSupported(VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT)
device_.IsSubgroupFeatureSupported(VK_SUBGROUP_FEATURE_BASIC_BIT,
VK_SHADER_STAGE_COMPUTE_BIT) &&
device_.IsSubgroupFeatureSupported(VK_SUBGROUP_FEATURE_ARITHMETIC_BIT,
VK_SHADER_STAGE_COMPUTE_BIT) &&
device_.IsSubgroupFeatureSupported(VK_SUBGROUP_FEATURE_SHUFFLE_BIT,
VK_SHADER_STAGE_COMPUTE_BIT) &&
device_.IsSubgroupFeatureSupported(VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT,
VK_SHADER_STAGE_COMPUTE_BIT)
? std::span<const u32>(QUERIES_PREFIX_SCAN_SUM_COMP_SPV)
: std::span<const u32>(QUERIES_PREFIX_SCAN_SUM_NOSUBGROUPS_COMP_SPV)),
scheduler{scheduler_}, compute_pass_descriptor_queue{compute_pass_descriptor_queue_} {}

View File

@@ -105,6 +105,26 @@ Shader::CompareFunction MaxwellToCompareFunction(Maxwell::ComparisonOp compariso
return {};
}
VkShaderStageFlagBits StageToVkStage(Shader::Stage stage) {
switch (stage) {
case Shader::Stage::VertexA:
case Shader::Stage::VertexB:
return VK_SHADER_STAGE_VERTEX_BIT;
case Shader::Stage::TessellationControl:
return VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
case Shader::Stage::TessellationEval:
return VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
case Shader::Stage::Geometry:
return VK_SHADER_STAGE_GEOMETRY_BIT;
case Shader::Stage::Fragment:
return VK_SHADER_STAGE_FRAGMENT_BIT;
case Shader::Stage::Compute:
return VK_SHADER_STAGE_COMPUTE_BIT;
default:
return VK_SHADER_STAGE_VERTEX_BIT;
}
}
Shader::AttributeType CastAttributeType(const FixedPipelineState::VertexAttribute& attr) {
if (attr.enabled == 0) {
return Shader::AttributeType::Disabled;
@@ -395,6 +415,27 @@ PipelineCache::PipelineCache(Tegra::MaxwellDeviceMemoryManager& device_memory_,
.support_conditional_barrier = device.SupportsConditionalBarriers(),
};
profile.warp_stage_support_mask = 0;
static constexpr std::array kAllStages{
Shader::Stage::VertexA, Shader::Stage::VertexB,
Shader::Stage::TessellationControl, Shader::Stage::TessellationEval,
Shader::Stage::Geometry, Shader::Stage::Fragment,
Shader::Stage::Compute,
};
for (const auto stage : kAllStages) {
const auto vk_stage = StageToVkStage(stage);
if (device.SupportsWarpIntrinsics(vk_stage)) {
profile.warp_stage_support_mask |= 1u << static_cast<u32>(stage);
}
}
profile.support_vote = profile.warp_stage_support_mask != 0;
if (!profile.SupportsWarpIntrinsics(Shader::Stage::Fragment)) {
LOG_WARNING(Render_Vulkan,
"Fragment shaders lack subgroup support on this driver; warp intrinsics will be "
"approximated and visual artifacts may remain");
}
if (device.GetMaxVertexInputAttributes() < Maxwell::NumVertexAttributes) {
LOG_WARNING(Render_Vulkan, "maxVertexInputAttributes is too low: {} < {}",
device.GetMaxVertexInputAttributes(), Maxwell::NumVertexAttributes);

View File

@@ -92,6 +92,11 @@ constexpr std::array VK_FORMAT_A4B4G4R4_UNORM_PACK16{
} // namespace Alternatives
constexpr VkShaderStageFlags GraphicsStageMask =
VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT | VK_SHADER_STAGE_GEOMETRY_BIT |
VK_SHADER_STAGE_FRAGMENT_BIT;
template <typename T>
void SetNext(void**& next, T& data) {
*next = &data;
@@ -1238,6 +1243,44 @@ void Device::RemoveUnsuitableExtensions() {
VK_KHR_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_EXTENSION_NAME);
}
bool Device::SupportsSubgroupStage(VkShaderStageFlags stage_mask) const {
if (stage_mask == 0) {
return true;
}
const VkShaderStageFlags supported = properties.subgroup_properties.supportedStages;
if ((supported & stage_mask) == stage_mask) {
return true;
}
if ((stage_mask & GraphicsStageMask) != 0 &&
(supported & (VK_SHADER_STAGE_ALL_GRAPHICS | VK_SHADER_STAGE_ALL)) != 0) {
return true;
}
if ((stage_mask & VK_SHADER_STAGE_COMPUTE_BIT) != 0 &&
(supported & (VK_SHADER_STAGE_COMPUTE_BIT | VK_SHADER_STAGE_ALL)) != 0) {
return true;
}
return (supported & VK_SHADER_STAGE_ALL) != 0;
}
bool Device::IsSubgroupFeatureSupported(VkSubgroupFeatureFlagBits feature,
VkShaderStageFlags stage_mask) const {
if ((properties.subgroup_properties.supportedOperations & feature) == 0) {
return false;
}
return SupportsSubgroupStage(stage_mask);
}
bool Device::SupportsWarpIntrinsics(VkShaderStageFlagBits stage) const {
constexpr VkSubgroupFeatureFlags required_ops =
VK_SUBGROUP_FEATURE_BASIC_BIT | VK_SUBGROUP_FEATURE_VOTE_BIT |
VK_SUBGROUP_FEATURE_ARITHMETIC_BIT | VK_SUBGROUP_FEATURE_BALLOT_BIT |
VK_SUBGROUP_FEATURE_SHUFFLE_BIT | VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT;
if ((properties.subgroup_properties.supportedOperations & required_ops) != required_ops) {
return false;
}
return SupportsSubgroupStage(stage);
}
void Device::SetupFamilies(VkSurfaceKHR surface) {
const std::vector queue_family_properties = physical.GetQueueFamilyProperties();
std::optional<u32> graphics;

View File

@@ -360,11 +360,26 @@ public:
return properties.subgroup_size_control.requiredSubgroupSizeStages & stage;
}
/// Returns true if the device supports the provided subgroup feature.
bool IsSubgroupFeatureSupported(VkSubgroupFeatureFlagBits feature) const {
return properties.subgroup_properties.supportedOperations & feature;
/// Returns true if the device supports the provided subgroup feature for the given stages.
bool IsSubgroupFeatureSupported(VkSubgroupFeatureFlagBits feature,
VkShaderStageFlags stage_mask = 0) const;
/// Returns true if the device reports subgroup support for the provided shader stages.
bool SupportsSubgroupStage(VkShaderStageFlags stage_mask) const;
/// Returns the set of stages that report subgroup support.
VkShaderStageFlags GetSubgroupSupportedStages() const {
return properties.subgroup_properties.supportedStages;
}
/// Returns the set of subgroup operations reported by the driver.
VkSubgroupFeatureFlags GetSubgroupSupportedOperations() const {
return properties.subgroup_properties.supportedOperations;
}
/// Returns true if the driver can execute all warp intrinsics for the given shader stage.
bool SupportsWarpIntrinsics(VkShaderStageFlagBits stage) const;
/// Returns the maximum number of push descriptors.
u32 MaxPushDescriptors() const {
return properties.push_descriptor.maxPushDescriptors;