Compare commits

..

69 Commits

Author SHA1 Message Date
CamilleLaVey
04c29645a2 Fix building 2025-12-25 23:57:02 -03:00
CamilleLaVey
5666401685 Warning fix 2025-12-25 23:57:01 -03:00
CamilleLaVey
be6e577bc2 [vk, pipeline, query_cache, rasterizer] transformFeedback buffering handling update 2025-12-25 23:57:01 -03:00
Caio Oliveira
875829f8fc Partially reverts "[vk] Changing conditions for Swapchain maintenance1 "
This reverts commit 3fc3b5fdad.
2025-12-25 23:57:01 -03:00
CamilleLaVey
fec98b8913 [revert] Resolving conflicting changes 2025-12-25 23:57:01 -03:00
CamilleLaVey
e6ec42ee9d Revert "[vk] Ensure image view flags are resolved" 2025-12-25 23:57:01 -03:00
CamilleLaVey
bc70f1b32d Revert "[vk, texture_cache] Preveting ARGB8 get misinterpretated with depth formats" 2025-12-25 23:57:01 -03:00
CamilleLaVey
94674c9143 Revert "[vk, texture_cache] BGRA8 Depth/Stencil format convertions" 2025-12-25 23:57:00 -03:00
CamilleLaVey
287dcd0bcf Revert "[maxwell] Logging for HDR wrong convertions into depth formats"
This reverts commit 66c26e39fe.
2025-12-25 23:57:00 -03:00
CamilleLaVey
cb19579b44 Revert "[surface, vk, pipeline, texture_cache] Refactor image view handling and add normalized compatible format utility"
This reverts commit 6a230bec1a.
2025-12-25 23:57:00 -03:00
CamilleLaVey
dd4deb9796 Revert "[spir-v] Add is_integer flag to texture descriptors and update image type handling" 2025-12-25 23:57:00 -03:00
CamilleLaVey
38caca49c6 Revert "[vk, pipeline, texture_cache] Renamed MakeView parametter" 2025-12-25 23:57:00 -03:00
CamilleLaVey
5a5d4b549a [vk] Gating advanced ExtendedDynamicState1 2025-12-25 23:57:00 -03:00
CamilleLaVey
2add5905b5 [licences] Updating licenses on missing files 2025-12-25 23:57:00 -03:00
CamilleLaVey
b517cfcbf3 [vk] Line rasterization and Alpha features adjusments (again) 2025-12-25 23:56:59 -03:00
CamilleLaVey
79b78de780 [vk, scheduler] Applying finising call for TF when it's not getting used 2025-12-25 23:56:59 -03:00
CamilleLaVey
352da451c0 [vk, qcom] Returning forced SScaled and UScaled formats emulations to Adreno. 2025-12-25 23:56:59 -03:00
CamilleLaVey
8ffdf69831 [vk, vendor] Forcing BGR5 emulation path due to driver misbehavior. 2025-12-25 23:56:59 -03:00
CamilleLaVey
0e4cd4ecd9 [vk] ExtendedDynamicState repair #2 2025-12-25 23:56:59 -03:00
CamilleLaVey
5919f2d860 [vk] Depth State Refresh Update. 2025-12-25 23:56:59 -03:00
CamilleLaVey
cd2f57d1e2 [vk, compute_pass] Conditioning Conditional Rendering 2025-12-25 23:56:58 -03:00
CamilleLaVey
c441d1883b [spir-v, emit] Flat Decoration Adjustment 2025-12-25 23:56:58 -03:00
CamilleLaVey
e56accf146 [spir-v, emit] SPV Image Missmatch 2025-12-25 23:56:58 -03:00
CamilleLaVey
cf37a68e07 [vk, rasterizer] Clamping Render-Area out of limits 2025-12-25 23:56:58 -03:00
CamilleLaVey
a2892fccdb [vk, rasterizer, state_tracker] LineMode disabled from scheduler 2025-12-25 23:56:58 -03:00
CamilleLaVey
c4b0d116e9 [surface, vk, pipeline, texture_cache] Texture Sampling Fix 2025-12-25 23:56:58 -03:00
CamilleLaVey
58fb3487d1 [vk, swapchain] Swapchaing Image VkQueue 2025-12-25 23:56:58 -03:00
CamilleLaVey
7d8c1baa87 [vk, graphics, pipeline, rasterizer] Alpha Coverage Adjustment 2025-12-25 23:56:57 -03:00
CamilleLaVey
ac23e3100f [vk, pipeline, texture_cache] Renamed MakeView parametter 2025-12-25 23:56:57 -03:00
CamilleLaVey
14d3d5814b [spir-v] Add is_integer flag to texture descriptors and update image type handling 2025-12-25 23:56:57 -03:00
CamilleLaVey
04e0b3147b [surface, vk, pipeline, texture_cache] Refactor image view handling and add normalized compatible format utility 2025-12-25 23:56:57 -03:00
CamilleLaVey
cb3495621e [vk] Removing false remove feature logging for robustness2 and image robustness. 2025-12-25 23:56:57 -03:00
CamilleLaVey
fbb513c83d [vk] ExtendedDynamicState repair #1 2025-12-25 23:56:57 -03:00
CamilleLaVey
8dfc42b45a [spir-v] Flat decorations for input interfaces 2025-12-25 23:56:56 -03:00
CamilleLaVey
977904cd27 [vk] VK_EXT_multi_draw 2025-12-25 23:56:54 -03:00
CamilleLaVey
0c26513484 [vk] Declaring features from Maintenance5 2025-12-25 23:53:26 -03:00
CamilleLaVey
2fb238ab56 [vk] Fixing logging statements 2025-12-25 23:53:26 -03:00
CamilleLaVey
2209efb618 [vk] Removing Image Robustness from EXT list. 2025-12-25 23:53:26 -03:00
CamilleLaVey
7d3380b38d [vk] ExtendedDynamicState impl close to Vulkan specs 2025-12-25 23:53:26 -03:00
CamilleLaVey
d161d5e7a2 [vk, rasterizer] Reduce FlushWork constant drawcalls 2025-12-25 23:53:26 -03:00
CamilleLaVey
2aa0bc9f0f [vk] Moving Maintenance features to wrapper 2025-12-25 23:53:26 -03:00
CamilleLaVey
07cf4451c2 [vk] Re-ordering tiling format features 2025-12-25 23:53:25 -03:00
CamilleLaVey
2e8e808958 [vk] Re-ordering format feature 2025-12-25 23:53:25 -03:00
CamilleLaVey
6aa581abeb [vk] Robustness2 and Image Robustness 2025-12-25 23:53:25 -03:00
CamilleLaVey
f9043627be [maxwell] Logging for HDR wrong convertions into depth formats 2025-12-25 23:53:25 -03:00
CamilleLaVey
1ddad7c59a [vk, texture_cache] BGRA8 Depth/Stencil format convertions 2025-12-25 23:53:25 -03:00
CamilleLaVey
5cea80588b [vk, texture_cache] Preveting ARGB8 get misinterpretated with depth formats 2025-12-25 23:53:25 -03:00
CamilleLaVey
df34184914 [vk] Adjusting Custom Border Color 2025-12-25 23:53:24 -03:00
CamilleLaVey
c78ad84d1e [vk] Adjusting VIDS 2025-12-25 23:53:24 -03:00
CamilleLaVey
3fe0f1829a [vk] Changing conditions for Swapchain maintenance1 2025-12-25 23:53:24 -03:00
CamilleLaVey
63527a377b [vk] Ensure image view flags are resolved 2025-12-25 23:53:24 -03:00
CamilleLaVey
c58d42a5e5 [vk] Aliging ExtendedDynamicState2 2025-12-25 23:53:24 -03:00
CamilleLaVey
eb3b4a9242 [vk, spir-v] Conditioning creation of VK_EXT_Shader_Stencil_Export in SPIR-V 2025-12-25 23:53:24 -03:00
CamilleLaVey
03c9adf480 fixing building error. 2025-12-25 23:53:24 -03:00
CamilleLaVey
66e97f718d [vk, texture_cache, vendor] Adding path for hardware resolve on shader stencil export/ MSAA image blits 2025-12-25 23:53:23 -03:00
CamilleLaVey
7ba5a3d6a0 [vk] Return VK 1.3 as main target, treat VK 1.4 core features as extensions if driver supports it 2025-12-25 23:53:23 -03:00
CamilleLaVey
4c223fdd11 [vk] Ordering double cases specified and allocating them in the correct please on GetSuitability phase 2025-12-25 23:53:23 -03:00
CamilleLaVey
b7fcb84749 Dammed macros. 2025-12-25 23:53:23 -03:00
CamilleLaVey
607dd6adac Fix building issues 2025-12-25 23:53:23 -03:00
CamilleLaVey
8663923709 [vk, qcom] VertexInputDynamicState ban removal 2025-12-25 23:53:23 -03:00
CamilleLaVey
273a0788be [vk] Bumping features to 1.4 2025-12-25 23:53:22 -03:00
CamilleLaVey
1cc6a3c5b3 [vk] Updated maintenance features 2025-12-25 23:53:22 -03:00
CamilleLaVey
2b059c5584 [vk, amd, qcom] Removed older driver workarounds 2025-12-25 23:53:22 -03:00
CamilleLaVey
ba5bb52c11 [vk, spir-v] Adding decoration for NonWritable buffers if vertexPipelineStoresAndAtomics isn't available 2025-12-25 23:53:22 -03:00
CamilleLaVey
a65552af38 [vk, buffer_cache] Aligning VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT logic 2025-12-25 23:53:22 -03:00
CamilleLaVey
50e85e232e [vk., vendor] Adding driver id flag in blacklist 2025-12-25 23:53:22 -03:00
CamilleLaVey
1bc297dc70 [Refactor, vk] DynamicState, ExtendedDynamicState and VertexInputDynamicState 2025-12-25 23:53:21 -03:00
Caio Oliveira
d25f9501e8 Revert "[vk] attempt to continue even if unsuitable driver (#3087)"
This reverts commit 7a98ee4ead.
2025-12-25 23:53:21 -03:00
Caio Oliveira
8db7de42a4 Revert "[video_core] Fix inconsistency between EDS and VIDS settings (#3148)"
This reverts commit 7157d5167e.
2025-12-25 23:53:21 -03:00
59 changed files with 1230 additions and 1245 deletions

View File

@@ -17,6 +17,8 @@ add_library(core STATIC
constants.h
core.cpp
core.h
game_settings.cpp
game_settings.h
core_timing.cpp
core_timing.h
cpu_manager.cpp
@@ -43,11 +45,7 @@ add_library(core STATIC
device_memory.cpp
device_memory.h
device_memory_manager.h
device_memory.h
device_memory_manager.h
device_memory_manager.inc
internal_network/legacy_online.cpp
internal_network/legacy_online.h
file_sys/bis_factory.cpp
file_sys/bis_factory.h
file_sys/card_image.cpp

View File

@@ -49,7 +49,6 @@
#include "core/hle/service/services.h"
#include "core/hle/service/set/system_settings_server.h"
#include "core/hle/service/sm/sm.h"
#include "core/internal_network/legacy_online.h"
#include "core/internal_network/network.h"
#include "core/loader/loader.h"
#include "core/memory.h"
@@ -138,13 +137,6 @@ struct System::Impl {
kernel.SetMulticore(is_multicore);
cpu_manager.SetMulticore(is_multicore);
cpu_manager.SetAsyncGpu(is_async_gpu);
cpu_manager.SetMulticore(is_multicore);
cpu_manager.SetAsyncGpu(is_async_gpu);
if (!legacy_online) {
legacy_online = std::make_unique<Network::LegacyOnlineService>();
legacy_online->Start();
}
}
void ReinitializeIfNecessary(System& system) {
@@ -301,48 +293,6 @@ struct System::Impl {
return SystemResultStatus::Success;
}
void LoadOverrides(u64 programId) const {
std::string vendor = gpu_core->Renderer().GetDeviceVendor();
LOG_INFO(Core, "GPU Vendor: {}", vendor);
// Reset all per-game flags
Settings::values.use_squashed_iterated_blend = false;
// Insert PC overrides here
#ifdef ANDROID
// Example on how to set a setting based on the program ID and vendor
if (programId == 0x010028600EBDA000 && vendor == "Mali") { // Mario 3d World
// Settings::values.example = true;
}
// Example array of program IDs
const std::array<u64, 10> example_array = {
//0xprogramId
0x0004000000033400, // Game 1
0x0004000000033500 // Game 2
// And so on
};
for (auto id : example_array) {
if (programId == id) {
// Settings::values.example = true;
break;
}
}
#endif
// Ninja Gaiden Ragebound
constexpr u64 ngr = 0x0100781020710000ULL;
if (programId == ngr) {
LOG_INFO(Core, "Enabling game specifc override: use_squashed_iterated_blend");
Settings::values.use_squashed_iterated_blend = true;
}
}
SystemResultStatus Load(System& system, Frontend::EmuWindow& emu_window,
const std::string& filepath,
Service::AM::FrontendAppletParameters& params) {
@@ -428,7 +378,8 @@ struct System::Impl {
LOG_ERROR(Core, "Failed to find program id for ROM");
}
LoadOverrides(program_id);
GameSettings::LoadOverrides(program_id, gpu_core->Renderer());
if (auto room_member = Network::GetRoomMember().lock()) {
Network::GameInfo game_info;
game_info.name = name;
@@ -477,12 +428,6 @@ struct System::Impl {
stop_event = {};
Network::RestartSocketOperations();
if (legacy_online) {
// Keep legacy_online running for the emulator's lifetime
// legacy_online->Stop();
// legacy_online.reset();
}
if (auto room_member = Network::GetRoomMember().lock()) {
Network::GameInfo game_info{};
room_member->SendGameInfo(game_info);
@@ -571,9 +516,6 @@ struct System::Impl {
/// Network instance
Network::NetworkInstance network_instance;
/// Legacy Online Service
std::unique_ptr<Network::LegacyOnlineService> legacy_online;
/// Debugger
std::unique_ptr<Core::Debugger> debugger;

View File

@@ -11,8 +11,6 @@
#include "core/core.h"
#include "core/file_sys/savedata_factory.h"
#include "core/file_sys/vfs/vfs.h"
#include "core/file_sys/sdmc_factory.h"
#include "core/hle/service/filesystem/filesystem.h"
namespace FileSys {
@@ -57,114 +55,39 @@ std::string GetFutureSaveDataPath(SaveDataSpaceId space_id, SaveDataType type, u
} // Anonymous namespace
VirtualDir SaveDataFactory::Open(SaveDataSpaceId space, const SaveDataAttribute& meta) const {
u64 target_program_id = meta.program_id;
// CRITICAL FIX: If the game requests Cache/Temp with ProgramID 0 (generic),
// we MUST redirect it to the actual running TitleID, otherwise it looks in '.../0000000000000000'.
if ((meta.type == SaveDataType::Cache || meta.type == SaveDataType::Temporary) && target_program_id == 0) {
target_program_id = system.GetApplicationProcessProgramID();
LOG_INFO(Service_FS, "Redirecting generic Cache request (ID 0) to active TitleID: {:016X}", target_program_id);
}
SaveDataFactory::SaveDataFactory(Core::System& system_, ProgramId program_id_,
VirtualDir save_directory_)
: system{system_}, program_id{program_id_}, dir{std::move(save_directory_)} {
// Delete all temporary storages
// On hardware, it is expected that temporary storage be empty at first use.
dir->DeleteSubdirectoryRecursive("temp");
}
const auto save_directory = GetFullPath(program_id, dir, space, meta.type, target_program_id,
SaveDataFactory::~SaveDataFactory() = default;
VirtualDir SaveDataFactory::Create(SaveDataSpaceId space, const SaveDataAttribute& meta) const {
const auto save_directory = GetFullPath(program_id, dir, space, meta.type, meta.program_id,
meta.user_id, meta.system_save_data_id);
return dir->CreateDirectoryRelative(save_directory);
}
VirtualDir SaveDataFactory::Open(SaveDataSpaceId space, const SaveDataAttribute& meta) const {
const auto save_directory = GetFullPath(program_id, dir, space, meta.type, meta.program_id,
meta.user_id, meta.system_save_data_id);
auto out = dir->GetDirectoryRelative(save_directory);
if (out == nullptr) {
LOG_WARNING(Service_FS, "Cache/Save path NOT FOUND: '{}'. Auto-create={}", save_directory, auto_create);
} else {
LOG_INFO(Service_FS, "Cache/Save path FOUND: '{}'", save_directory);
}
if (out == nullptr && (ShouldSaveDataBeAutomaticallyCreated(space, meta) && auto_create)) {
LOG_INFO(Service_FS, "Auto-creating save directory...");
return Create(space, meta);
}
if (out != nullptr) {
// Some emulators (Ryujinx) or even different firmware versions may rely on the commit
// directories /0 or /1 being present for cache or save data.
// We prioritizing /1 as it usually implies a newer commit if both exist,
// but /0 is what's commonly used by Ryujinx for cache.
// Ryujinx behavior: If 0 exists and 1 does not, copy 0 to 1.
auto dir_0 = out->GetSubdirectory("0");
auto dir_1 = out->GetSubdirectory("1");
if (dir_0) LOG_INFO(Service_FS, "Found subdirectory '0' in save path.");
if (dir_1) LOG_INFO(Service_FS, "Found subdirectory '1' in save path.");
if (dir_0 != nullptr && dir_1 == nullptr) {
// User requested removal of auto-copy 0->1 logic.
// We simply don't create/copy '1' if it's missing. We rely on fallback to '0'.
LOG_INFO(Service_FS, "Ryujinx structure detected: '0' exists, '1' missing. Skipping copy (User Request).");
// VfsRawCopyD(dir_0, dir_1); // REMOVED
}
// Check for 'Addressables' and 'Addressables2' and delete 'json.cache' if present.
// This is a specific workaround for games (e.g. Just Dance) that freeze if they find old cache metadata.
// We force them to regenerate it.
const auto CleanCache = [](VirtualDir root) {
if (root == nullptr) return;
const char* subdirs[] = {"Addressables", "Addressables2"};
for (const char* subdir_name : subdirs) {
auto subdir = root->GetSubdirectory(subdir_name);
if (subdir != nullptr) {
if (subdir->DeleteFile("json.cache")) {
LOG_INFO(Service_FS, "Deleted stale 'json.cache' in '{}'", subdir_name);
}
}
}
};
VirtualDir commit_root = nullptr;
if (dir_1 != nullptr) {
LOG_INFO(Service_FS, "Using subdirectory '1' as Commit Root.");
commit_root = dir_1;
} else if (dir_0 != nullptr) {
LOG_INFO(Service_FS, "Using subdirectory '0' as Commit Root.");
commit_root = dir_0;
} else {
LOG_INFO(Service_FS, "No '0' or '1' subdirectories found. Using parent folder as Commit Root.");
commit_root = out;
}
CleanCache(commit_root);
// Implement SD_Cache.XXXX logic for Cache Storage
if (meta.type == SaveDataType::Cache) {
const std::string sd_cache_name = fmt::format("SD_Cache.{:04}", meta.index);
auto sd_cache_dir = commit_root->GetSubdirectory(sd_cache_name);
if (sd_cache_dir != nullptr) {
LOG_INFO(Service_FS, "Found SD_Cache directory: '{}'", sd_cache_name);
return sd_cache_dir;
} else if (auto_create) {
LOG_INFO(Service_FS, "Auto-creating SD_Cache directory: '{}'", sd_cache_name);
return commit_root->CreateSubdirectory(sd_cache_name);
} else {
LOG_WARNING(Service_FS, "SD_Cache directory '{}' not found in commit root.", sd_cache_name);
// Fallback? Or return nullptr?
// If the user strictly wants SD_Cache, we should probably return nullptr if not found/created.
// But for now, returning the commit_root might be safer for legacy compatibility IF SD_Cache isn't strictly enforced for existing saves?
// User said "SD_Cache now represents which CacheStorage it will be". This implies correct behavior is to use SD_Cache.
// If we return commit_root, it's CacheStorage_0 (conceptually) or just "everything".
// Let's assume we return nullptr if not found, to trigger creation logic or error.
return nullptr;
}
}
return commit_root;
}
return out;
}
VirtualDir SaveDataFactory::GetSaveDataSpaceDirectory(SaveDataSpaceId space) const {
const auto path = GetSaveDataSpaceIdPath(space);
// Ensure the directory exists, otherwise FindAllSaves fails.
return GetOrCreateDirectoryRelative(dir, path);
// return dir->GetDirectoryRelative(GetSaveDataSpaceIdPath(space));
return dir->GetDirectoryRelative(GetSaveDataSpaceIdPath(space));
}
std::string SaveDataFactory::GetSaveDataSpaceIdPath(SaveDataSpaceId space) {
@@ -173,12 +96,12 @@ std::string SaveDataFactory::GetSaveDataSpaceIdPath(SaveDataSpaceId space) {
return "/system/";
case SaveDataSpaceId::User:
case SaveDataSpaceId::SdUser:
case SaveDataSpaceId::Temporary: // Map into User so we can find the save/ folder
return "/user/";
case SaveDataSpaceId::Temporary:
return "/temp/";
default:
// ASSERT_MSG(false, "Unrecognized SaveDataSpaceId: {:02X}", static_cast<u8>(space));
LOG_WARNING(Service_FS, "Unrecognized SaveDataSpaceId: {:02X}, defaulting to /user/", static_cast<u8>(space));
return "/user/";
ASSERT_MSG(false, "Unrecognized SaveDataSpaceId: {:02X}", static_cast<u8>(space));
return "/unrecognized/"; ///< To prevent corruption when ignoring asserts.
}
}
@@ -214,9 +137,8 @@ std::string SaveDataFactory::GetFullPath(ProgramId program_id, VirtualDir dir,
return fmt::format("{}save/{:016X}/{:016X}{:016X}/{:016X}", out, 0, user_id[1], user_id[0],
title_id);
case SaveDataType::Temporary:
// Unified Cache/Temporary Path: Always use save/cache/{TitleID}
// This simplifies user instructions and avoids UUID/Permission issues.
return fmt::format("{}save/cache/{:016X}", out, title_id);
return fmt::format("{}{:016X}/{:016X}{:016X}/{:016X}", out, 0, user_id[1], user_id[0],
title_id);
case SaveDataType::Cache:
return fmt::format("{}save/cache/{:016X}", out, title_id);
default:
@@ -225,40 +147,6 @@ std::string SaveDataFactory::GetFullPath(ProgramId program_id, VirtualDir dir,
}
}
SaveDataFactory::SaveDataFactory(Core::System& system_, ProgramId program_id_,
VirtualDir save_directory_)
: system{system_}, program_id{program_id_}, dir{std::move(save_directory_)} {
// Delete all temporary storages
// On hardware, it is expected that temporary storage be empty at first use.
dir->DeleteSubdirectoryRecursive("temp");
}
SaveDataFactory::~SaveDataFactory() = default;
VirtualDir SaveDataFactory::Create(SaveDataSpaceId space, const SaveDataAttribute& meta) const {
const auto save_directory = GetFullPath(program_id, dir, space, meta.type, meta.program_id,
meta.user_id, meta.system_save_data_id);
auto created_dir = dir->CreateDirectoryRelative(save_directory);
// For Cache storage, enforce the new hierarchy: .../1/SD_Cache.XXXX
if (meta.type == SaveDataType::Cache && created_dir != nullptr) {
// Ensure commit directory '1' exists
auto commit_dir = created_dir->GetSubdirectory("1");
if (commit_dir == nullptr) {
commit_dir = created_dir->CreateSubdirectory("1");
}
if (commit_dir != nullptr) {
// Create SD_Cache.XXXX
const std::string sd_cache_name = fmt::format("SD_Cache.{:04}", meta.index);
return commit_dir->CreateSubdirectory(sd_cache_name);
}
}
return created_dir;
}
std::string SaveDataFactory::GetUserGameSaveDataRoot(u128 user_id, bool future) {
if (future) {
Common::UUID uuid;

View File

@@ -686,16 +686,6 @@ void FileSystemController::CreateFactories(FileSys::VfsFilesystem& vfs, bool ove
using EdenPath = Common::FS::EdenPath;
const auto sdmc_dir_path = Common::FS::GetEdenPath(EdenPath::SDMCDir);
const auto sdmc_load_dir_path = sdmc_dir_path / "atmosphere/contents";
// If the NAND user save location doesn't exist but the SDMC contains
// Nintendo/save (common portable save structure), create a host-side
// symlink so the emulator will see those saves under the expected NAND path.
// This helps users who placed saves under `<eden>/user/sdmc/Nintendo/save/...`.
// SDMC to NAND sync logic REMOVED as per user request.
// The emulator will no longer attempt to symlink or copy "Nintendo/save" or "SD_Cache.0000"
// from SDMC to the NAND user save directory.
// Users must ensure their save/cache structure is valid within the NAND directory itself
// if that is what they intend to use, or rely on the game creating it.
const auto rw_mode = FileSys::OpenMode::ReadWrite;
auto nand_directory =

View File

@@ -14,7 +14,7 @@ namespace Service::FileSystem {
ISaveDataInfoReader::ISaveDataInfoReader(Core::System& system_,
std::shared_ptr<SaveDataController> save_data_controller_,
FileSys::SaveDataSpaceId space, bool cache_only)
FileSys::SaveDataSpaceId space)
: ServiceFramework{system_, "ISaveDataInfoReader"}, save_data_controller{
save_data_controller_} {
static const FunctionInfo functions[] = {
@@ -22,7 +22,7 @@ ISaveDataInfoReader::ISaveDataInfoReader(Core::System& system_,
};
RegisterHandlers(functions);
FindAllSaves(space, cache_only);
FindAllSaves(space);
}
ISaveDataInfoReader::~ISaveDataInfoReader() = default;
@@ -63,7 +63,7 @@ Result ISaveDataInfoReader::ReadSaveDataInfo(
R_SUCCEED();
}
void ISaveDataInfoReader::FindAllSaves(FileSys::SaveDataSpaceId space, bool cache_only) {
void ISaveDataInfoReader::FindAllSaves(FileSys::SaveDataSpaceId space) {
FileSys::VirtualDir save_root{};
const auto result = save_data_controller->OpenSaveDataSpace(&save_root, space);
@@ -74,12 +74,8 @@ void ISaveDataInfoReader::FindAllSaves(FileSys::SaveDataSpaceId space, bool cach
for (const auto& type : save_root->GetSubdirectories()) {
if (type->GetName() == "save") {
if (cache_only) {
FindCacheSaves(space, type);
} else {
FindNormalSaves(space, type);
}
} else if (space == FileSys::SaveDataSpaceId::Temporary && !cache_only) {
FindNormalSaves(space, type);
} else if (space == FileSys::SaveDataSpaceId::Temporary) {
FindTemporaryStorageSaves(space, type);
}
}
@@ -88,11 +84,6 @@ void ISaveDataInfoReader::FindAllSaves(FileSys::SaveDataSpaceId space, bool cach
void ISaveDataInfoReader::FindNormalSaves(FileSys::SaveDataSpaceId space,
const FileSys::VirtualDir& type) {
for (const auto& save_id : type->GetSubdirectories()) {
// Skip cache directory in normal scans
if (save_id->GetName() == "cache") {
continue;
}
for (const auto& user_id : save_id->GetSubdirectories()) {
// Skip non user id subdirectories
if (user_id->GetName().size() != 0x20) {
@@ -141,96 +132,6 @@ void ISaveDataInfoReader::FindNormalSaves(FileSys::SaveDataSpaceId space,
}
}
void ISaveDataInfoReader::FindCacheSaves(FileSys::SaveDataSpaceId space,
const FileSys::VirtualDir& type) {
const auto cache_dir = type->GetSubdirectory("cache");
if (cache_dir == nullptr) {
return;
}
for (const auto& title_id_dir : cache_dir->GetSubdirectories()) {
const auto title_id = stoull_be(title_id_dir->GetName());
// Simple validation: TitleID should be non-zero
if (title_id == 0) {
continue;
}
// Determine commit root (priority to "1", then "0", then self)
auto commit_root = title_id_dir->GetSubdirectory("1");
if (commit_root == nullptr) {
commit_root = title_id_dir->GetSubdirectory("0");
}
// If neither exists, we might fall back to title_id_dir itself if users put SD_Cache directly there,
// but based on SaveDataFactory we expect it inside 0 or 1.
if (commit_root == nullptr) {
// Check if SD_Cache exists directly in title_dir (legacy/fallback)
bool has_sd_cache_root = false;
for (const auto& sub : title_id_dir->GetSubdirectories()) {
if (sub->GetName().find("SD_Cache.") == 0) {
has_sd_cache_root = true;
break;
}
}
if (has_sd_cache_root) {
commit_root = title_id_dir;
} else {
continue; // No valid storage found
}
}
bool found_any = false;
for (const auto& sd_cache_dir : commit_root->GetSubdirectories()) {
const std::string& name = sd_cache_dir->GetName();
if (name.find("SD_Cache.") == 0) {
// Parse index from "SD_Cache.XXXX"
u64 index = 0;
try {
if (name.size() > 9) {
index = std::stoull(name.substr(9));
}
} catch(...) {
continue;
}
info.emplace_back(SaveDataInfo{
0,
space,
FileSys::SaveDataType::Cache,
{}, // padding 0x6
{}, // user_id (empty array match)
0, // save_id
title_id,
sd_cache_dir->GetSize(),
static_cast<u16>(index), // Correct index with cast
FileSys::SaveDataRank::Primary,
{}, // padding 0x25
});
found_any = true;
}
}
// Fallback for legacy "flat" cache if no SD_Cache folders found?
// If the user specific structure IS enforced, maybe we don't fallback.
// But if they have existing cache without the folder, it is effectively index 0.
if (!found_any) {
// Treat the entire commit_root as index 0 (Legacy behavior)
info.emplace_back(SaveDataInfo{
0,
space,
FileSys::SaveDataType::Cache,
{}, // padding 0x6
{}, // user_id (empty array match)
0, // save_id
title_id,
commit_root->GetSize(),
0, // index 0
FileSys::SaveDataRank::Primary,
{}, // padding 0x25
});
}
}
}
void ISaveDataInfoReader::FindTemporaryStorageSaves(FileSys::SaveDataSpaceId space,
const FileSys::VirtualDir& type) {
for (const auto& user_id : type->GetSubdirectories()) {

View File

@@ -16,7 +16,7 @@ class ISaveDataInfoReader final : public ServiceFramework<ISaveDataInfoReader> {
public:
explicit ISaveDataInfoReader(Core::System& system_,
std::shared_ptr<SaveDataController> save_data_controller_,
FileSys::SaveDataSpaceId space, bool cache_only = false);
FileSys::SaveDataSpaceId space);
~ISaveDataInfoReader() override;
struct SaveDataInfo {
@@ -38,9 +38,8 @@ public:
OutArray<SaveDataInfo, BufferAttr_HipcMapAlias> out_entries);
private:
void FindAllSaves(FileSys::SaveDataSpaceId space, bool cache_only);
void FindAllSaves(FileSys::SaveDataSpaceId space);
void FindNormalSaves(FileSys::SaveDataSpaceId space, const FileSys::VirtualDir& type);
void FindCacheSaves(FileSys::SaveDataSpaceId space, const FileSys::VirtualDir& type);
void FindTemporaryStorageSaves(FileSys::SaveDataSpaceId space, const FileSys::VirtualDir& type);
std::shared_ptr<SaveDataController> save_data_controller;

View File

@@ -353,10 +353,10 @@ Result FSP_SRV::OpenSaveDataInfoReaderBySaveDataSpaceId(
Result FSP_SRV::OpenSaveDataInfoReaderOnlyCacheStorage(
OutInterface<ISaveDataInfoReader> out_interface) {
LOG_DEBUG(Service_FS, "called");
LOG_WARNING(Service_FS, "(STUBBED) called");
*out_interface = std::make_shared<ISaveDataInfoReader>(system, save_data_controller,
FileSys::SaveDataSpaceId::User, true);
FileSys::SaveDataSpaceId::Temporary);
R_SUCCEED();
}

View File

@@ -42,16 +42,8 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
const auto& message_header = context.GetDomainMessageHeader();
const auto object_id = message_header.object_id;
// Some games send magic numbers in the object_id field, which indicates
// this is not actually a proper domain request
const u32 sfci_magic = Common::MakeMagic('S', 'F', 'C', 'I');
const u32 sfco_magic = Common::MakeMagic('S', 'F', 'C', 'O');
if (object_id == sfci_magic || object_id == sfco_magic) {
// This is not a domain request, treat it as a regular session request
return session_handler != nullptr;
}
if (object_id > DomainHandlerCount()) {
LOG_CRITICAL(IPC, "object_id {} is too big!", object_id);
return false;
}
return !DomainHandler(object_id - 1).expired();
@@ -99,18 +91,7 @@ Result SessionRequestManager::HandleDomainSyncRequest(Kernel::KServerSession* se
// If there is a DomainMessageHeader, then this is CommandType "Request"
const auto& domain_message_header = context.GetDomainMessageHeader();
const u32 object_id = domain_message_header.object_id;
// Some games send magic numbers in the object_id field
const u32 sfci_magic = Common::MakeMagic('S', 'F', 'C', 'I');
const u32 sfco_magic = Common::MakeMagic('S', 'F', 'C', 'O');
if (object_id == sfci_magic || object_id == sfco_magic) {
// This is not a domain request, handle as regular session request
LOG_DEBUG(IPC, "Detected magic number 0x{:08X} in object_id, treating as regular session request. Command={}",
object_id, context.GetCommand());
return session_handler->HandleSyncRequest(*server_session, context);
}
const u32 object_id{domain_message_header.object_id};
switch (domain_message_header.command) {
case IPC::DomainMessageHeader::CommandType::SendMessage:
if (object_id > this->DomainHandlerCount()) {
@@ -219,17 +200,7 @@ void HLERequestContext::ParseCommandBuffer(u32_le* src_cmdbuf, bool incoming) {
// If this is an incoming message, only CommandType "Request" has a domain header
// All outgoing domain messages have the domain header, if only incoming has it
if (incoming || domain_message_header) {
// Check if the next value is actually a magic number (SFCI/SFCO)
// Some games send these in the object_id field, indicating this is not a domain request
const u32 possible_object_id = src_cmdbuf[rp.GetCurrentOffset() + 1]; // object_id is second field
const u32 sfci_magic = Common::MakeMagic('S', 'F', 'C', 'I');
const u32 sfco_magic = Common::MakeMagic('S', 'F', 'C', 'O');
if (possible_object_id != sfci_magic && possible_object_id != sfco_magic) {
// This is a proper domain request
domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>();
}
// If it's a magic number, skip reading the domain header entirely
domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>();
} else {
if (GetManager()->IsDomain()) {
LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!");
@@ -249,15 +220,9 @@ void HLERequestContext::ParseCommandBuffer(u32_le* src_cmdbuf, bool incoming) {
}
if (incoming) {
// Only check magic if we have a valid data payload header
// When domain header is skipped (SFCI in object_id), the structure is different
if (domain_message_header) {
ASSERT(data_payload_header->magic == Common::MakeMagic('S', 'F', 'C', 'I'));
}
ASSERT(data_payload_header->magic == Common::MakeMagic('S', 'F', 'C', 'I'));
} else {
if (domain_message_header) {
ASSERT(data_payload_header->magic == Common::MakeMagic('S', 'F', 'C', 'O'));
}
ASSERT(data_payload_header->magic == Common::MakeMagic('S', 'F', 'C', 'O'));
}
}

View File

@@ -923,10 +923,7 @@ std::pair<s32, Errno> BSD::SendImpl(s32 fd, u32 flags, std::span<const u8> messa
if (!IsFileDescriptorValid(fd)) {
return {-1, Errno::BADF};
}
LOG_CRITICAL(Service, "SendImpl called: fd={}, size={} bytes, flags={}", fd, message.size(), flags);
auto result = Translate(file_descriptors[fd]->socket->Send(message, flags));
LOG_CRITICAL(Service, "SendImpl result: sent={} bytes, errno={}", result.first, static_cast<int>(result.second));
return result;
return Translate(file_descriptors[fd]->socket->Send(message, flags));
}
std::pair<s32, Errno> BSD::SendToImpl(s32 fd, u32 flags, std::span<const u8> message,

View File

@@ -60,29 +60,12 @@ NSD::NSD(Core::System& system_, const char* name) : ServiceFramework{system_, na
RegisterHandlers(functions);
}
static const std::vector<std::pair<std::string, std::string>> redirectionRules = {
{"public-ubiservices.com", "jdlo.ovosimpatico.com"},
{"public-ubiservices.ubi.com", "jdlo.ovosimpatico.com"},
};
static std::string GetRedirectedHost(const std::string& host) {
for (const auto& rule : redirectionRules) {
if (host.find(rule.first) != std::string::npos) {
LOG_INFO(Service, "Redirecting NSD host '{}' to '{}'", host, rule.second);
return rule.second;
}
}
return host;
}
static std::string ResolveImpl(const std::string& fqdn_in) {
// The real implementation makes various substitutions.
// For now we just return the string as-is, which is good enough when not
// connecting to real Nintendo servers.
LOG_WARNING(Service, "(STUBBED) called, fqdn_in={}", fqdn_in);
return GetRedirectedHost(fqdn_in);
return fqdn_in;
}
static Result ResolveCommon(const std::string& fqdn_in, std::array<char, 0x100>& fqdn_out) {

View File

@@ -29,13 +29,13 @@ SFDNSRES::SFDNSRES(Core::System& system_) : ServiceFramework{system_, "sfdnsres"
{4, nullptr, "GetHostStringErrorRequest"},
{5, &SFDNSRES::GetGaiStringErrorRequest, "GetGaiStringErrorRequest"},
{6, &SFDNSRES::GetAddrInfoRequest, "GetAddrInfoRequest"},
{7, &SFDNSRES::GetNameInfoRequest, "GetNameInfoRequest"},
{8, &SFDNSRES::RequestCancelHandleRequest, "RequestCancelHandleRequest"},
{7, nullptr, "GetNameInfoRequest"},
{8, nullptr, "RequestCancelHandleRequest"},
{9, nullptr, "CancelRequest"},
{10, &SFDNSRES::GetHostByNameRequestWithOptions, "GetHostByNameRequestWithOptions"},
{11, nullptr, "GetHostByAddrRequestWithOptions"},
{12, &SFDNSRES::GetAddrInfoRequestWithOptions, "GetAddrInfoRequestWithOptions"},
{13, &SFDNSRES::GetNameInfoRequestWithOptions, "GetNameInfoRequestWithOptions"},
{13, nullptr, "GetNameInfoRequestWithOptions"},
{14, &SFDNSRES::ResolverSetOptionRequest, "ResolverSetOptionRequest"},
{15, nullptr, "ResolverGetOptionRequest"},
};
@@ -66,21 +66,6 @@ static bool IsBlockedHost(const std::string& host) {
[&host](const std::string& domain) { return host.find(domain) != std::string::npos; });
}
static const std::vector<std::pair<std::string, std::string>> redirectionRules = {
{"public-ubiservices.com", "jdlo.ovosimpatico.com"},
{"public-ubiservices.ubi.com", "jdlo.ovosimpatico.com"},
};
static std::string GetRedirectedHost(const std::string& host) {
for (const auto& rule : redirectionRules) {
if (host.find(rule.first) != std::string::npos) {
LOG_INFO(Service, "Redirecting host '{}' to '{}'", host, rule.second);
return rule.second;
}
}
return host;
}
static NetDbError GetAddrInfoErrorToNetDbError(GetAddrInfoError result) {
// These combinations have been verified on console (but are not
// exhaustive).
@@ -178,8 +163,7 @@ static std::pair<u32, GetAddrInfoError> GetHostByNameRequestImpl(HLERequestConte
parameters.use_nsd_resolve, parameters.cancel_handle, parameters.process_id);
const auto host_buffer = ctx.ReadBuffer(0);
std::string host = Common::StringFromBuffer(host_buffer);
host = GetRedirectedHost(host);
const std::string host = Common::StringFromBuffer(host_buffer);
// For now, ignore options, which are in input buffer 1 for GetHostByNameRequestWithOptions.
// Prevent resolution of Nintendo servers
@@ -297,8 +281,7 @@ static std::pair<u32, GetAddrInfoError> GetAddrInfoRequestImpl(HLERequestContext
// before looking up.
const auto host_buffer = ctx.ReadBuffer(0);
std::string host = Common::StringFromBuffer(host_buffer);
host = GetRedirectedHost(host);
const std::string host = Common::StringFromBuffer(host_buffer);
// Prevent resolution of Nintendo servers
if (IsBlockedHost(host)) {
@@ -381,120 +364,6 @@ void SFDNSRES::GetAddrInfoRequestWithOptions(HLERequestContext& ctx) {
});
}
void SFDNSRES::GetNameInfoRequest(HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto addr_in = rp.PopRaw<SockAddrIn>();
const u32 flags = rp.Pop<u32>();
const u32 cancel_handle = rp.Pop<u32>();
const u64 process_id = rp.Pop<u64>();
LOG_DEBUG(Service, "called. flags={}, cancel_handle={}, process_id={}", flags, cancel_handle,
process_id);
struct OutputParameters {
u32 data_size;
GetAddrInfoError gai_error;
NetDbError netdb_error;
Errno bsd_errno;
};
static_assert(sizeof(OutputParameters) == 0x10);
const auto res = Network::GetNameInfo(Translate(addr_in));
if (res.second != 0) {
const auto network_error = Network::TranslateGetAddrInfoErrorFromNative(res.second);
const auto service_error = Translate(network_error);
IPC::ResponseBuilder rb{ctx, 6};
rb.Push(ResultSuccess);
rb.PushRaw(OutputParameters{
.data_size = 0,
.gai_error = service_error,
.netdb_error = GetAddrInfoErrorToNetDbError(service_error),
.bsd_errno = GetAddrInfoErrorToErrno(service_error),
});
return;
}
const std::string& host = res.first;
const u32 data_size = static_cast<u32>(host.size() + 1);
ctx.WriteBuffer(host.data(), data_size, 0);
IPC::ResponseBuilder rb{ctx, 6};
rb.Push(ResultSuccess);
rb.PushRaw(OutputParameters{
.data_size = data_size,
.gai_error = GetAddrInfoError::SUCCESS,
.netdb_error = NetDbError::Success,
.bsd_errno = Errno::SUCCESS,
});
}
void SFDNSRES::GetNameInfoRequestWithOptions(HLERequestContext& ctx) {
struct InputParameters {
u32 flags;
u32 interface_index;
u64 process_id;
u32 padding; // 0x14 + 4 = 0x18? No. 0x14 aligned to 8 bytes?
// Wait, sizeof(InputParameters) == 0x14.
};
// Derived from partial snippets:
// u32 flags, u32 interface_index, u64 process_id.
// 4 + 4 + 8 = 16 bytes (0x10).
// The previous prompt had static_assert size 0x14.
// Maybe a u32 padding?
// Let's rely on standard layout.
// I will use manual popping for safety if struct definition is unknown.
IPC::RequestParser rp{ctx};
const auto addr_in = rp.PopRaw<SockAddrIn>();
const u32 flags = rp.Pop<u32>();
const u32 interface_index = rp.Pop<u32>();
const u64 process_id = rp.Pop<u64>();
(void)flags;
(void)interface_index;
(void)process_id;
// If there was padding, it might be implicitly popped or ignored.
struct OutputParameters {
u32 data_size;
GetAddrInfoError gai_error;
NetDbError netdb_error;
Errno bsd_errno;
};
static_assert(sizeof(OutputParameters) == 0x10);
const auto res = Network::GetNameInfo(Translate(addr_in));
if (res.second != 0) {
const auto network_error = Network::TranslateGetAddrInfoErrorFromNative(res.second);
const auto service_error = Translate(network_error);
IPC::ResponseBuilder rb{ctx, 6};
rb.Push(ResultSuccess);
rb.PushRaw(OutputParameters{
.data_size = 0,
.gai_error = service_error,
.netdb_error = GetAddrInfoErrorToNetDbError(service_error),
.bsd_errno = GetAddrInfoErrorToErrno(service_error),
});
return;
}
const std::string& host = res.first;
const u32 data_size = static_cast<u32>(host.size() + 1);
ctx.WriteBuffer(host.data(), data_size, 0);
IPC::ResponseBuilder rb{ctx, 6};
rb.Push(ResultSuccess);
rb.PushRaw(OutputParameters{
.data_size = data_size,
.gai_error = GetAddrInfoError::SUCCESS,
.netdb_error = NetDbError::Success,
.bsd_errno = Errno::SUCCESS,
});
}
void SFDNSRES::ResolverSetOptionRequest(HLERequestContext& ctx) {
LOG_WARNING(Service, "(STUBBED) called");
@@ -503,24 +372,4 @@ void SFDNSRES::ResolverSetOptionRequest(HLERequestContext& ctx) {
rb.Push(ResultSuccess);
rb.Push<s32>(0); // bsd errno
}
void SFDNSRES::RequestCancelHandleRequest(HLERequestContext& ctx) {
// This is just a stub for now.
// In a real implementation this would likely cancel a pending request represented by the handle.
LOG_WARNING(Service, "(STUBBED) called");
struct InputParameters {
u32 cancel_handle;
};
IPC::RequestParser rp{ctx};
auto input = rp.PopRaw<InputParameters>();
LOG_DEBUG(Service, "cancel_handle={}", input.cancel_handle);
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(ResultSuccess);
rb.Push<u32>(0); // cancel_handle (response seems to echo it or return a new one? usually just an error code or unrelated)
// Actually based on typical patterns, it probably returns an error code (bsd_errno).
rb.Push<s32>(0); // bsd_errno
}
} // namespace Service::Sockets

View File

@@ -22,10 +22,7 @@ private:
void GetHostByNameRequestWithOptions(HLERequestContext& ctx);
void GetAddrInfoRequest(HLERequestContext& ctx);
void GetAddrInfoRequestWithOptions(HLERequestContext& ctx);
void GetNameInfoRequest(HLERequestContext& ctx);
void GetNameInfoRequestWithOptions(HLERequestContext& ctx);
void ResolverSetOptionRequest(HLERequestContext& ctx);
void RequestCancelHandleRequest(HLERequestContext& ctx);
};
} // namespace Service::Sockets

View File

@@ -16,12 +16,10 @@ enum class Errno : u32 {
SUCCESS = 0,
BADF = 9,
AGAIN = 11,
ACCES = 13,
INVAL = 22,
MFILE = 24,
PIPE = 32,
MSGSIZE = 90,
ADDRINUSE = 98,
CONNABORTED = 103,
CONNRESET = 104,
NOTCONN = 107,

View File

@@ -37,10 +37,6 @@ Errno Translate(Network::Errno value) {
return Errno::CONNRESET;
case Network::Errno::INPROGRESS:
return Errno::INPROGRESS;
case Network::Errno::ACCES:
return Errno::ACCES;
case Network::Errno::ADDRINUSE:
return Errno::ADDRINUSE;
default:
UNIMPLEMENTED_MSG("Unimplemented errno={}", value);
return Errno::SUCCESS;
@@ -265,9 +261,7 @@ PollEvents Translate(Network::PollEvents flags) {
Network::SockAddrIn Translate(SockAddrIn value) {
// Note: 6 is incorrect, but can be passed by homebrew (because libnx sets
// sin_len to 6 when deserializing getaddrinfo results).
if (value.len != 0 && value.len != sizeof(value) && value.len != 6) {
LOG_WARNING(Service, "Unexpected SockAddrIn length={}", value.len);
}
ASSERT(value.len == 0 || value.len == sizeof(value) || value.len == 6);
return {
.family = Translate(static_cast<Domain>(value.family)),

View File

@@ -117,20 +117,15 @@ public:
RegisterHandlers(functions);
shared_data->connection_count++;
LOG_CRITICAL(Service_SSL, "ISslConnection created! Total connections: {}", shared_data->connection_count);
}
~ISslConnection() {
shared_data->connection_count--;
if (fd_to_close.has_value()) {
const s32 fd = *fd_to_close;
if (do_not_close_socket) {
// If we aren't supposed to close the socket, but we have an fd_to_close,
// that means the configuration changed after we took ownership.
// This is weird but we should probably honor the flag.
// However, the original valid logic seemed to imply we duped the socket
// and should close our dup... but let's stick to what the flag says.
LOG_INFO(Service_SSL, "do_not_close_socket is true, skipping close of fd {}", fd);
if (!do_not_close_socket) {
LOG_ERROR(Service_SSL,
"do_not_close_socket was changed after setting socket; is this right?");
} else {
auto bsd = system.ServiceManager().GetService<Service::Sockets::BSD>("bsd:u");
if (bsd) {
@@ -274,17 +269,16 @@ private:
}
Result PendingImpl(s32* out_pending) {
ASSERT_OR_EXECUTE(did_handshake, { return ResultInternalError; });
return backend->Pending(out_pending);
LOG_WARNING(Service_SSL, "(STUBBED) called.");
*out_pending = 0;
return ResultSuccess;
}
void SetSocketDescriptor(HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const s32 in_fd = rp.Pop<s32>();
LOG_CRITICAL(Service_SSL, "SetSocketDescriptor called with fd={}", in_fd);
s32 out_fd{-1};
const Result res = SetSocketDescriptorImpl(&out_fd, in_fd);
LOG_CRITICAL(Service_SSL, "SetSocketDescriptor result: res={}, out_fd={}", res.raw, out_fd);
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(res);
rb.Push<s32>(out_fd);
@@ -314,9 +308,7 @@ private:
}
void DoHandshake(HLERequestContext& ctx) {
LOG_INFO(Service_SSL, "DoHandshake called, socket={}", socket != nullptr);
const Result res = DoHandshakeImpl();
LOG_INFO(Service_SSL, "DoHandshake result: {}, did_handshake={}", res.raw, did_handshake);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(res);
}

View File

@@ -38,7 +38,6 @@ public:
virtual Result Read(size_t* out_size, std::span<u8> data) = 0;
virtual Result Write(size_t* out_size, std::span<const u8> data) = 0;
virtual Result GetServerCerts(std::vector<std::vector<u8>>* out_certs) = 0;
virtual Result Pending(s32* out_pending) = 0;
};
Result CreateSSLConnectionBackend(std::unique_ptr<SSLConnectionBackend>* out_backend);

View File

@@ -115,22 +115,6 @@ inline void LoadCaCertStore(SSL_CTX* ctx, const char* ca_cert, std::size_t size)
}
#endif
static const std::vector<std::pair<std::string, std::string>> redirectionRules = {
{"public-ubiservices.com", "jdlo.ovosimpatico.com"},
{"public-ubiservices.ubi.com", "jdlo.ovosimpatico.com"},
};
static std::string GetRedirectedHost(const std::string& host) {
for (const auto& rule : redirectionRules) {
if (host.find(rule.first) != std::string::npos) {
LOG_INFO(Service_SSL, "Redirecting SSL host '{}' to '{}'", host, rule.second);
return rule.second;
}
}
return host;
}
} // namespace
class SSLConnectionBackendOpenSSL final : public SSLConnectionBackend {
@@ -173,8 +157,7 @@ public:
socket = std::move(socket_in);
}
Result SetHostName(const std::string& hostname_in) override {
const std::string hostname = GetRedirectedHost(hostname_in);
Result SetHostName(const std::string& hostname) override {
if (!SSL_set1_host(ssl, hostname.c_str())) { // hostname for verification
LOG_ERROR(Service_SSL, "SSL_set1_host({}) failed", hostname);
return CheckOpenSSLErrors();
@@ -264,28 +247,6 @@ public:
return ResultSuccess;
}
Result Pending(s32* out_pending) override {
if (!ssl) {
return ResultInternalError;
}
int pending = SSL_pending(ssl);
if (pending > 0) {
*out_pending = pending;
return ResultSuccess;
}
Network::PollFD poll_fd{socket.get(), Network::PollEvents::In, Network::PollEvents::In};
std::vector<Network::PollFD> poll_fds{poll_fd};
auto [count, err] = Network::Poll(poll_fds, 0);
if (count > 0 && (poll_fds[0].revents & Network::PollEvents::In) != Network::PollEvents{}) {
*out_pending = 1;
} else {
*out_pending = 0;
}
return ResultSuccess;
}
~SSLConnectionBackendOpenSSL() {
// this is null-tolerant:
SSL_free(ssl);

View File

@@ -489,27 +489,6 @@ public:
return ResultSuccess;
}
Result Pending(s32* out_pending) override {
*out_pending = static_cast<s32>(cleartext_read_buf.size());
if (*out_pending > 0) {
return ResultSuccess;
}
if (!ciphertext_read_buf.empty()) {
*out_pending = 1;
return ResultSuccess;
}
Network::PollFD poll_fd{socket.get(), Network::PollEvents::In, Network::PollEvents::In};
std::vector<Network::PollFD> poll_fds{poll_fd};
auto [count, err] = Network::Poll(poll_fds, 0);
if (count > 0 && (poll_fds[0].revents & Network::PollEvents::In) != Network::PollEvents{}) {
*out_pending = 1;
}
return ResultSuccess;
}
~SSLConnectionBackendSchannel() {
if (handshake_state != HandshakeState::Initial) {
DeleteSecurityContext(&ctxt);

View File

@@ -149,26 +149,6 @@ public:
return ResultSuccess;
}
Result Pending(s32* out_pending) override {
size_t bufferSize = 0;
OSStatus status = SSLGetBufferedReadSize(context, &bufferSize);
if (status == 0 && bufferSize > 0) {
*out_pending = static_cast<s32>(bufferSize);
return ResultSuccess;
}
Network::PollFD poll_fd{socket.get(), Network::PollEvents::In, Network::PollEvents::In};
std::vector<Network::PollFD> poll_fds{poll_fd};
auto [count, err] = Network::Poll(poll_fds, 0);
if (count > 0 && (poll_fds[0].revents & Network::PollEvents::In) != Network::PollEvents{}) {
*out_pending = 1;
} else {
*out_pending = 0;
}
return ResultSuccess;
}
static OSStatus ReadCallback(SSLConnectionRef connection, void* data, size_t* dataLength) {
return ReadOrWriteCallback(connection, data, dataLength, true);
}

View File

@@ -7,7 +7,7 @@
#include "core/internal_network/network_interface.h"
#ifdef _WIN32
#define NOMINMAX
#include <windows.h>
#include <wlanapi.h>
#ifdef _MSC_VER

View File

@@ -1,180 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
#include "core/internal_network/legacy_online.h"
#include <cstring>
#include <iostream>
#ifdef _WIN32
#include <winsock2.h>
#else
#include <arpa/inet.h>
#include <netinet/in.h>
#include <sys/socket.h>
#include <unistd.h>
#endif
#include "common/logging/log.h"
namespace Network {
LegacyOnlineService::LegacyOnlineService() {
#ifdef _WIN32
WSADATA wsa_data;
if (WSAStartup(MAKEWORD(2, 2), &wsa_data) != 0) {
LOG_ERROR(Network, "WSAStartup failed with error: {}", WSAGetLastError());
winsock_initialized = false;
} else {
winsock_initialized = true;
}
#endif
}
LegacyOnlineService::~LegacyOnlineService() {
Stop();
#ifdef _WIN32
if (winsock_initialized) {
WSACleanup();
}
#endif
}
void LegacyOnlineService::Start() {
if (is_running) {
return;
}
#ifdef _WIN32
if (!winsock_initialized) {
LOG_ERROR(Network, "Cannot start Legacy Online Service: Winsock not initialized");
return;
}
#endif
is_running = true;
worker_thread = std::thread(&LegacyOnlineService::ServerLoop, this);
}
void LegacyOnlineService::Stop() {
if (!is_running) {
return;
}
is_running = false;
// Close socket to wake up the thread if it's blocked on recvfrom
if (socket_fd != ~0ULL) {
#ifdef _WIN32
closesocket(static_cast<SOCKET>(socket_fd));
#else
close(static_cast<int>(socket_fd));
#endif
socket_fd = ~0ULL;
}
if (worker_thread.joinable()) {
worker_thread.join();
}
}
void LegacyOnlineService::ServerLoop() {
LOG_INFO(Network, "Starting Legacy Online UDP Server on port {}", PORT);
// WSAStartup is now handled in the constructor
auto s = socket(AF_INET, SOCK_DGRAM, 0);
#ifdef _WIN32
if (s == INVALID_SOCKET) {
#else
if (s == -1) {
#endif
LOG_ERROR(Network, "Failed to create socket");
is_running = false;
return;
}
socket_fd = static_cast<uintptr_t>(s);
int opt = 1;
#ifdef _WIN32
setsockopt(static_cast<SOCKET>(socket_fd), SOL_SOCKET, SO_REUSEADDR, (const char*)&opt, sizeof(opt));
#else
setsockopt(static_cast<int>(socket_fd), SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt));
#endif
sockaddr_in server_addr{};
server_addr.sin_family = AF_INET;
server_addr.sin_addr.s_addr = INADDR_ANY;
server_addr.sin_port = htons(PORT);
int res = -1;
#ifdef _WIN32
res = bind(static_cast<SOCKET>(socket_fd), (sockaddr*)&server_addr, sizeof(server_addr));
#else
res = bind(static_cast<int>(socket_fd), (sockaddr*)&server_addr, sizeof(server_addr));
#endif
if (res < 0) {
#ifdef _WIN32
LOG_ERROR(Network, "Failed to bind to port {}: {}", PORT, WSAGetLastError());
closesocket(static_cast<SOCKET>(socket_fd));
#else
LOG_ERROR(Network, "Failed to bind to port {}: {}", PORT, strerror(errno));
close(static_cast<int>(socket_fd));
#endif
socket_fd = ~0ULL;
is_running = false;
return;
}
LOG_INFO(Network, "Legacy Online Server waiting for messages...");
// Set a timeout for recvfrom so check is_running periodically if not closed via socket
// Alternatively, closing the socket (as done in Stop) will cause recvfrom to return error
char buffer[2048];
while (is_running) {
sockaddr_in client_addr{};
#ifdef _WIN32
int client_len = sizeof(client_addr);
#else
socklen_t client_len = sizeof(client_addr);
#endif
int len = -1;
#ifdef _WIN32
len = recvfrom(static_cast<SOCKET>(socket_fd), buffer, sizeof(buffer), 0, (sockaddr*)&client_addr, &client_len);
#else
len = recvfrom(static_cast<int>(socket_fd), buffer, sizeof(buffer), 0, (sockaddr*)&client_addr, &client_len);
#endif
if (!is_running) break;
if (len > 0) {
// Send ACK
const char* ack_msg = "ACK";
#ifdef _WIN32
sendto(static_cast<SOCKET>(socket_fd), ack_msg, static_cast<int>(strlen(ack_msg)), 0, (sockaddr*)&client_addr, client_len);
#else
sendto(static_cast<int>(socket_fd), ack_msg, strlen(ack_msg), 0, (sockaddr*)&client_addr, client_len);
#endif
} else {
// Error or closed
// If we closed the socket in Stop(), this will likely trigger.
break;
}
}
#ifdef _WIN32
if (socket_fd != ~0ULL) closesocket(static_cast<SOCKET>(socket_fd));
// WSACleanup is now handled in the destructor
#else
if (socket_fd != ~0ULL) close(static_cast<int>(socket_fd));
#endif
socket_fd = ~0ULL;
LOG_INFO(Network, "Legacy Online Server stopped");
}
} // namespace Network

View File

@@ -1,31 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
#pragma once
#include <atomic>
#include <cstdint>
#include <memory>
#include <thread>
namespace Network {
class LegacyOnlineService {
public:
LegacyOnlineService();
~LegacyOnlineService();
void Start();
void Stop();
private:
void ServerLoop();
std::atomic_bool is_running{false};
std::thread worker_thread;
uintptr_t socket_fd{~0ULL}; // ~0ULL is approx -1 equivalent for unsigned
bool winsock_initialized{false};
static constexpr int PORT = 6000;
};
} // namespace Network

View File

@@ -75,8 +75,6 @@ SOCKET GetInterruptSocket() {
return interrupt_socket;
}
} // namespace
sockaddr TranslateFromSockAddrIn(SockAddrIn input) {
sockaddr_in result;
@@ -85,7 +83,6 @@ sockaddr TranslateFromSockAddrIn(SockAddrIn input) {
#endif
switch (static_cast<Domain>(input.family)) {
case Domain::Unspecified:
case Domain::INET:
result.sin_family = AF_INET;
break;
@@ -108,8 +105,6 @@ sockaddr TranslateFromSockAddrIn(SockAddrIn input) {
return addr;
}
namespace {
LINGER MakeLinger(bool enable, u32 linger_value) {
ASSERT(linger_value <= (std::numeric_limits<u_short>::max)());
@@ -129,7 +124,6 @@ Errno TranslateNativeError(int e, CallType call_type = CallType::Other) {
case 0:
return Errno::SUCCESS;
case WSAEBADF:
case WSAENOTSOCK:
return Errno::BADF;
case WSAEINVAL:
return Errno::INVAL;
@@ -163,10 +157,6 @@ Errno TranslateNativeError(int e, CallType call_type = CallType::Other) {
return Errno::TIMEDOUT;
case WSAEINPROGRESS:
return Errno::INPROGRESS;
case WSAEACCES:
return Errno::ACCES;
case WSAEADDRINUSE:
return Errno::ADDRINUSE;
default:
UNIMPLEMENTED_MSG("Unimplemented errno={}", e);
return Errno::OTHER;
@@ -222,8 +212,6 @@ SOCKET GetInterruptSocket() {
return interrupt_pipe_fd[0];
}
} // namespace
sockaddr TranslateFromSockAddrIn(SockAddrIn input) {
sockaddr_in result;
@@ -246,8 +234,6 @@ sockaddr TranslateFromSockAddrIn(SockAddrIn input) {
return addr;
}
namespace {
int WSAPoll(WSAPOLLFD* fds, ULONG nfds, int timeout) {
return poll(fds, static_cast<nfds_t>(nfds), timeout);
}
@@ -334,8 +320,6 @@ Errno GetAndLogLastError(CallType call_type = CallType::Other) {
return err;
}
} // namespace
GetAddrInfoError TranslateGetAddrInfoErrorFromNative(int gai_err) {
switch (gai_err) {
case 0:
@@ -389,8 +373,6 @@ GetAddrInfoError TranslateGetAddrInfoErrorFromNative(int gai_err) {
}
}
namespace {
Domain TranslateDomainFromNative(int domain) {
switch (domain) {
case 0:
@@ -625,8 +607,6 @@ Common::Expected<std::vector<AddrInfo>, GetAddrInfoError> GetAddressInfo(
return ret;
}
std::pair<s32, Errno> Poll(std::vector<PollFD>& pollfds, s32 timeout) {
const size_t num = pollfds.size();
@@ -891,13 +871,8 @@ std::pair<s32, Errno> Socket::SendTo(u32 flags, std::span<const u8> message,
}
Errno Socket::Close() {
if (fd == INVALID_SOCKET) {
return Errno::SUCCESS;
}
[[maybe_unused]] const int result = closesocket(fd);
if (result != 0) {
GetAndLogLastError();
}
ASSERT(result == 0);
fd = INVALID_SOCKET;
return Errno::SUCCESS;

View File

@@ -48,8 +48,6 @@ enum class Errno {
TIMEDOUT,
MSGSIZE,
INPROGRESS,
ACCES,
ADDRINUSE,
OTHER,
};
@@ -124,33 +122,8 @@ std::optional<IPv4Address> GetHostIPv4Address();
std::string IPv4AddressToString(IPv4Address ip_addr);
u32 IPv4AddressToInteger(IPv4Address ip_addr);
#ifdef _WIN32
#include <ws2tcpip.h>
#else
#include <netdb.h>
#endif
// named to avoid name collision with Windows macro
Common::Expected<std::vector<AddrInfo>, GetAddrInfoError> GetAddressInfo(
const std::string& host, const std::optional<std::string>& service);
sockaddr TranslateFromSockAddrIn(SockAddrIn input);
inline std::pair<std::string, int> GetNameInfo(const SockAddrIn& addr) {
sockaddr sa = TranslateFromSockAddrIn(addr);
sockaddr_in addr_in{};
std::memcpy(&addr_in, &sa, sizeof(sockaddr_in));
char host[1025]; // NI_MAXHOST
int err = getnameinfo(reinterpret_cast<const sockaddr*>(&addr_in), sizeof(addr_in), host, sizeof(host), nullptr, 0, 0);
if (err != 0) {
return {std::string{}, err};
}
return {std::string(host), 0};
}
GetAddrInfoError TranslateGetAddrInfoErrorFromNative(int err);
} // namespace Network

View File

@@ -446,12 +446,10 @@ void EmitIsHelperInvocation(EmitContext& ctx, IR::Inst& inst) {
void EmitSR_WScaleFactorXY(EmitContext& ctx, IR::Inst& inst) {
LOG_WARNING(Shader, "(STUBBED) called");
ctx.AddU32("{}=0x3c003c00u;", inst);
}
void EmitSR_WScaleFactorZ(EmitContext& ctx, IR::Inst& inst) {
LOG_WARNING(Shader, "(STUBBED) called");
ctx.AddU32("{}=0x3f800000u;", inst);
}
void EmitYDirection(EmitContext& ctx, IR::Inst& inst) {

View File

@@ -58,9 +58,6 @@ std::string FormatFloat(std::string_view value, IR::Type type) {
if (value == "nan") {
return "utof(0x7fc00000)";
}
if (value == "-nan") {
return "utof(0xffc00000)";
}
if (value == "inf") {
return "utof(0x7f800000)";
}

View File

@@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -542,8 +545,9 @@ Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id c
lod = Id{};
}
const ImageOperands operands(lod, ms);
return Emit(&EmitContext::OpImageSparseFetch, &EmitContext::OpImageFetch, ctx, inst, ctx.F32[4],
TextureImage(ctx, info, index), coords, operands.MaskOptional(), operands.Span());
return Emit(&EmitContext::OpImageSparseFetch, &EmitContext::OpImageFetch, ctx, inst,
ctx.F32[4], TextureImage(ctx, info, index), coords, operands.MaskOptional(),
operands.Span());
}
Id EmitImageQueryDimensions(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id lod,

View File

@@ -315,6 +315,9 @@ void DefineSsbos(EmitContext& ctx, StorageTypeDefinition& type_def,
ctx.Decorate(id, spv::Decoration::Binding, binding);
ctx.Decorate(id, spv::Decoration::DescriptorSet, 0U);
ctx.Name(id, fmt::format("ssbo{}", index));
if (!desc.is_written) {
ctx.Decorate(id, spv::Decoration::NonWritable);
}
if (ctx.profile.supported_spirv >= 0x00010400) {
ctx.interfaces.push_back(id);
}
@@ -1432,6 +1435,9 @@ void EmitContext::DefineInputs(const IR::Program& program) {
}
if (info.uses_sample_id) {
sample_id = DefineInput(*this, U32[1], false, spv::BuiltIn::SampleId);
if (stage == Stage::Fragment) {
Decorate(sample_id, spv::Decoration::Flat);
}
}
if (info.uses_is_helper_invocation) {
is_helper_invocation = DefineInput(*this, U1, false, spv::BuiltIn::HelperInvocation);
@@ -1442,6 +1448,13 @@ void EmitContext::DefineInputs(const IR::Program& program) {
subgroup_mask_le = DefineInput(*this, U32[4], false, spv::BuiltIn::SubgroupLeMaskKHR);
subgroup_mask_gt = DefineInput(*this, U32[4], false, spv::BuiltIn::SubgroupGtMaskKHR);
subgroup_mask_ge = DefineInput(*this, U32[4], false, spv::BuiltIn::SubgroupGeMaskKHR);
if (stage == Stage::Fragment) {
Decorate(subgroup_mask_eq, spv::Decoration::Flat);
Decorate(subgroup_mask_lt, spv::Decoration::Flat);
Decorate(subgroup_mask_le, spv::Decoration::Flat);
Decorate(subgroup_mask_gt, spv::Decoration::Flat);
Decorate(subgroup_mask_ge, spv::Decoration::Flat);
}
}
if (info.uses_fswzadd || info.uses_subgroup_invocation_id || info.uses_subgroup_shuffles ||
(profile.warp_size_potentially_larger_than_guest &&
@@ -1449,7 +1462,9 @@ void EmitContext::DefineInputs(const IR::Program& program) {
AddCapability(spv::Capability::GroupNonUniform);
subgroup_local_invocation_id =
DefineInput(*this, U32[1], false, spv::BuiltIn::SubgroupLocalInvocationId);
Decorate(subgroup_local_invocation_id, spv::Decoration::Flat);
if (stage == Stage::Fragment) {
Decorate(subgroup_local_invocation_id, spv::Decoration::Flat);
}
}
if (info.uses_fswzadd) {
const Id f32_one{Const(1.0f)};
@@ -1461,6 +1476,9 @@ void EmitContext::DefineInputs(const IR::Program& program) {
}
if (loads[IR::Attribute::PrimitiveId]) {
primitive_id = DefineInput(*this, U32[1], false, spv::BuiltIn::PrimitiveId);
if (stage == Stage::Fragment) {
Decorate(primitive_id, spv::Decoration::Flat);
}
}
if (loads[IR::Attribute::Layer]) {
AddCapability(spv::Capability::Geometry);
@@ -1552,17 +1570,21 @@ void EmitContext::DefineInputs(const IR::Program& program) {
if (stage != Stage::Fragment) {
continue;
}
switch (info.interpolation[index]) {
case Interpolation::Smooth:
// Default
// Decorate(id, spv::Decoration::Smooth);
break;
case Interpolation::NoPerspective:
Decorate(id, spv::Decoration::NoPerspective);
break;
case Interpolation::Flat:
const bool is_integer = input_type == AttributeType::SignedInt ||
input_type == AttributeType::UnsignedInt;
if (is_integer) {
Decorate(id, spv::Decoration::Flat);
break;
} else {
switch (info.interpolation[index]) {
case Interpolation::Smooth:
break;
case Interpolation::NoPerspective:
Decorate(id, spv::Decoration::NoPerspective);
break;
case Interpolation::Flat:
Decorate(id, spv::Decoration::Flat);
break;
}
}
}
if (stage == Stage::TessellationEval) {

View File

@@ -1705,26 +1705,21 @@ Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr, u32 cbuf_index,
return NULL_BINDING;
}
// xbzk: New size logic. Fixes MCI.
// If ever the * comment below prove wrong, the 'if' block may be removed.
const auto size = [&]() {
const bool is_nvn_cbuf = cbuf_index == 0;
// The NVN driver buffer (index 0) is known to pack the SSBO address followed by its size.
if (is_nvn_cbuf) {
// * The NVN driver buffer (index 0) is known to pack the SSBO address followed by its size.
const u64 next_qword = gpu_memory->Read<u64>(ssbo_addr + 8);
const u32 upper_32 = static_cast<u32>(next_qword >> 32);
// Hardware-based detection: GPU addresses have non-zero upper bits
if (upper_32 == 0) {
// This is a size field, not a GPU address
return static_cast<u32>(next_qword); // Return lower_32
const u32 ssbo_size = gpu_memory->Read<u32>(ssbo_addr + 8);
if (ssbo_size != 0) {
return ssbo_size;
}
}
// Fall through: either not NVN cbuf (Doom Eternal & +), or NVN but ssbo_addr+8 is a GPU address (MCI)
// Other titles (notably Doom Eternal) may use STG/LDG on buffer addresses in custom defined
// cbufs, which do not store the sizes adjacent to the addresses, so use the fully
// mapped buffer size for now.
const u32 memory_layout_size = static_cast<u32>(gpu_memory->GetMemoryLayoutSize(gpu_addr));
// Cap at 8MB to prevent allocator overflow from misinterpreted addresses
return (std::min)(memory_layout_size, static_cast<u32>(8_MiB));
}();
// Alignment only applies to the offset of the buffer
const u32 alignment = runtime.GetStorageBufferAlignment();
const GPUVAddr aligned_gpu_addr = Common::AlignDown(gpu_addr, alignment);

View File

@@ -81,8 +81,7 @@ public:
if constexpr (can_async_check) {
guard.lock();
}
// if ((Settings::IsGPULevelLow() || Settings::IsGPULevelMedium()) && !should_flush) {
if (false) {
if (Settings::IsGPULevelLow() || (Settings::IsGPULevelMedium() && !should_flush)) {
func();
} else {
uncommitted_operations.emplace_back(std::move(func));

View File

@@ -107,19 +107,9 @@ void Vic::Execute() {
auto output_height{config.output_surface_config.out_surface_height + 1};
output_surface.resize_destructive(output_width * output_height);
// Initialize the surface with the appropriate black pixel
Pixel black_pixel{};
if (config.output_surface_config.out_pixel_format == VideoPixelFormat::Y8__V8U8_N420) {
// Y=0, U=512, V=512 (10-bit), A=0
black_pixel = {0, 512, 512, 0};
} else {
// R=0, G=0, B=0, A=0
black_pixel = {0, 0, 0, 0};
}
std::fill(output_surface.begin(), output_surface.end(), black_pixel);
if (Settings::values.nvdec_emulation.GetValue() == Settings::NvdecEmulation::Off) [[unlikely]] {
// Fill the frame with black, as otherwise they can have random data and be very glitchy.
std::fill(output_surface.begin(), output_surface.end(), Pixel{});
} else {
for (size_t i = 0; i < config.slot_structs.size(); i++) {
auto& slot_config{config.slot_structs[i]};
@@ -132,18 +122,7 @@ void Vic::Execute() {
nvdec_id = frame_queue.VicFindNvdecFdFromOffset(luma_offset);
}
auto frame = frame_queue.GetFrame(nvdec_id, luma_offset);
if (!frame) {
// We might've failed to find the frame, or the nvdec id is stale/wrong.
// Try to find the nvdec id again.
const s32 new_id = frame_queue.VicFindNvdecFdFromOffset(luma_offset);
if (new_id != -1) {
nvdec_id = new_id;
frame = frame_queue.GetFrame(nvdec_id, luma_offset);
}
}
if (frame) {
if (auto frame = frame_queue.GetFrame(nvdec_id, luma_offset); frame) {
if (frame.get()) {
switch (frame->GetPixelFormat()) {
case AV_PIX_FMT_YUV420P:
@@ -191,7 +170,6 @@ void Vic::ReadProgressiveY8__V8U8_N420(const SlotStruct& slot, std::span<const P
}
slot_surface.resize_destructive(out_luma_width * out_luma_height);
std::fill(slot_surface.begin(), slot_surface.end(), Pixel{0, 512, 512, 0});
const auto in_luma_width{(std::min)(frame->GetWidth(), s32(out_luma_width))};
const auto in_luma_height{(std::min)(frame->GetHeight(), s32(out_luma_height))};
@@ -241,7 +219,6 @@ void Vic::ReadInterlacedY8__V8U8_N420(const SlotStruct& slot, std::span<const Pl
const auto out_luma_stride{out_luma_width};
slot_surface.resize_destructive(out_luma_width * out_luma_height);
std::fill(slot_surface.begin(), slot_surface.end(), Pixel{0, 512, 512, 0});
const auto in_luma_width{(std::min)(frame->GetWidth(), s32(out_luma_width))};
[[maybe_unused]] const auto in_luma_height{

View File

@@ -33,8 +33,6 @@ static OGLProgram LinkSeparableProgram(GLuint shader) {
glGetProgramInfoLog(program.handle, log_length, nullptr, log.data());
if (link_status == GL_FALSE) {
LOG_ERROR(Render_OpenGL, "{}", log);
glDeleteProgram(program.handle);
program.handle = 0;
} else {
LOG_WARNING(Render_OpenGL, "{}", log);
}

View File

@@ -116,14 +116,9 @@ void WindowAdaptPass::DrawToFramebuffer(ProgramManager& program_manager, std::li
glBindTextureUnit(0, textures[i]);
glProgramUniformMatrix3x2fv(vert.handle, ModelViewMatrixLocation, 1, GL_FALSE,
matrices[i].data());
if (frag.handle != 0) {
const GLint screen_size_loc = glGetUniformLocation(frag.handle, "screen_size");
if (screen_size_loc != -1) {
glProgramUniform2ui(frag.handle, screen_size_loc,
static_cast<GLuint>(layout.screen.GetWidth()),
static_cast<GLuint>(layout.screen.GetHeight()));
}
}
glProgramUniform2ui(frag.handle, ScreenSizeLocation,
static_cast<GLuint>(layout.screen.GetWidth()),
static_cast<GLuint>(layout.screen.GetHeight()));
glNamedBufferSubData(vertex_buffer.handle, 0, sizeof(vertices[i]), std::data(vertices[i]));
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
}

View File

@@ -525,18 +525,24 @@ BlitImageHelper::BlitImageHelper(const Device& device_, Scheduler& scheduler_,
nullptr, PUSH_CONSTANT_RANGE<VK_SHADER_STAGE_FRAGMENT_BIT, sizeof(float) * 4>))),
full_screen_vert(BuildShader(device, FULL_SCREEN_TRIANGLE_VERT_SPV)),
blit_color_to_color_frag(BuildShader(device, BLIT_COLOR_FLOAT_FRAG_SPV)),
blit_depth_stencil_frag(BuildShader(device, VULKAN_BLIT_DEPTH_STENCIL_FRAG_SPV)),
blit_depth_stencil_frag(device.IsExtShaderStencilExportSupported()
? BuildShader(device, VULKAN_BLIT_DEPTH_STENCIL_FRAG_SPV)
: vk::ShaderModule{}),
clear_color_vert(BuildShader(device, VULKAN_COLOR_CLEAR_VERT_SPV)),
clear_color_frag(BuildShader(device, VULKAN_COLOR_CLEAR_FRAG_SPV)),
clear_stencil_frag(BuildShader(device, VULKAN_DEPTHSTENCIL_CLEAR_FRAG_SPV)),
convert_depth_to_float_frag(BuildShader(device, CONVERT_DEPTH_TO_FLOAT_FRAG_SPV)),
convert_float_to_depth_frag(BuildShader(device, CONVERT_FLOAT_TO_DEPTH_FRAG_SPV)),
convert_abgr8_to_d24s8_frag(BuildShader(device, CONVERT_ABGR8_TO_D24S8_FRAG_SPV)),
convert_abgr8_to_d24s8_frag(device.IsExtShaderStencilExportSupported()
? BuildShader(device, CONVERT_ABGR8_TO_D24S8_FRAG_SPV)
: vk::ShaderModule{}),
convert_abgr8_to_d32f_frag(BuildShader(device, CONVERT_ABGR8_TO_D32F_FRAG_SPV)),
convert_d32f_to_abgr8_frag(BuildShader(device, CONVERT_D32F_TO_ABGR8_FRAG_SPV)),
convert_d24s8_to_abgr8_frag(BuildShader(device, CONVERT_D24S8_TO_ABGR8_FRAG_SPV)),
convert_s8d24_to_abgr8_frag(BuildShader(device, CONVERT_S8D24_TO_ABGR8_FRAG_SPV)),
convert_abgr8_srgb_to_d24s8_frag(BuildShader(device, CONVERT_ABGR8_SRGB_TO_D24S8_FRAG_SPV)),
convert_abgr8_srgb_to_d24s8_frag(device.IsExtShaderStencilExportSupported()
? BuildShader(device, CONVERT_ABGR8_SRGB_TO_D24S8_FRAG_SPV)
: vk::ShaderModule{}),
convert_rgba_to_bgra_frag(BuildShader(device, CONVERT_RGBA8_TO_BGRA8_FRAG_SPV)),
convert_yuv420_to_rgb_comp(BuildShader(device, CONVERT_YUV420_TO_RGB_COMP_SPV)),
convert_rgb_to_yuv420_comp(BuildShader(device, CONVERT_RGB_TO_YUV420_COMP_SPV)),
@@ -667,6 +673,11 @@ void BlitImageHelper::ConvertR16ToD16(const Framebuffer* dst_framebuffer,
void BlitImageHelper::ConvertABGR8ToD24S8(const Framebuffer* dst_framebuffer,
const ImageView& src_image_view) {
if (!device.IsExtShaderStencilExportSupported()) {
// Shader requires VK_EXT_shader_stencil_export which is not available
LOG_WARNING(Render_Vulkan, "ConvertABGR8ToD24S8 requires shader_stencil_export, skipping");
return;
}
ConvertPipelineDepthTargetEx(convert_abgr8_to_d24s8_pipeline, dst_framebuffer->RenderPass(),
convert_abgr8_to_d24s8_frag);
Convert(*convert_abgr8_to_d24s8_pipeline, dst_framebuffer, src_image_view);
@@ -702,6 +713,11 @@ void BlitImageHelper::ConvertS8D24ToABGR8(const Framebuffer* dst_framebuffer,
void BlitImageHelper::ConvertABGR8SRGBToD24S8(const Framebuffer* dst_framebuffer,
const ImageView& src_image_view) {
if (!device.IsExtShaderStencilExportSupported()) {
// Shader requires VK_EXT_shader_stencil_export which is not available
LOG_WARNING(Render_Vulkan, "ConvertABGR8SRGBToD24S8 requires shader_stencil_export, skipping");
return;
}
ConvertPipelineDepthTargetEx(convert_abgr8_srgb_to_d24s8_pipeline,
dst_framebuffer->RenderPass(),
convert_abgr8_srgb_to_d24s8_frag);

View File

@@ -59,7 +59,7 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d, DynamicFe
raw1 = 0;
extended_dynamic_state.Assign(features.has_extended_dynamic_state ? 1 : 0);
extended_dynamic_state_2.Assign(features.has_extended_dynamic_state_2 ? 1 : 0);
extended_dynamic_state_2_extra.Assign(features.has_extended_dynamic_state_2_extra ? 1 : 0);
extended_dynamic_state_2_logic_op.Assign(features.has_extended_dynamic_state_2_logic_op ? 1 : 0);
extended_dynamic_state_3_blend.Assign(features.has_extended_dynamic_state_3_blend ? 1 : 0);
extended_dynamic_state_3_enables.Assign(features.has_extended_dynamic_state_3_enables ? 1 : 0);
dynamic_vertex_input.Assign(features.has_dynamic_vertex_input ? 1 : 0);
@@ -157,7 +157,7 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d, DynamicFe
return static_cast<u16>(array.stride.Value());
});
}
if (!extended_dynamic_state_2_extra) {
if (!extended_dynamic_state_2_logic_op) {
dynamic_state.Refresh2(regs, topology_, extended_dynamic_state_2);
}
if (!extended_dynamic_state_3_blend) {

View File

@@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -20,7 +23,8 @@ using Maxwell = Tegra::Engines::Maxwell3D::Regs;
struct DynamicFeatures {
bool has_extended_dynamic_state;
bool has_extended_dynamic_state_2;
bool has_extended_dynamic_state_2_extra;
bool has_extended_dynamic_state_2_logic_op;
bool has_extended_dynamic_state_2_patch_control_points;
bool has_extended_dynamic_state_3_blend;
bool has_extended_dynamic_state_3_enables;
bool has_dynamic_vertex_input;
@@ -186,7 +190,7 @@ struct FixedPipelineState {
u32 raw1;
BitField<0, 1, u32> extended_dynamic_state;
BitField<1, 1, u32> extended_dynamic_state_2;
BitField<2, 1, u32> extended_dynamic_state_2_extra;
BitField<2, 1, u32> extended_dynamic_state_2_logic_op;
BitField<3, 1, u32> extended_dynamic_state_3_blend;
BitField<4, 1, u32> extended_dynamic_state_3_enables;
BitField<5, 1, u32> dynamic_vertex_input;

View File

@@ -583,7 +583,9 @@ void BufferCacheRuntime::BindVertexBuffer(u32 index, VkBuffer buffer, u32 offset
if (index >= device.GetMaxVertexInputBindings()) {
return;
}
if (device.IsExtExtendedDynamicStateSupported()) {
// Use BindVertexBuffers2EXT only if EDS1 is supported AND VIDS is not active
// When VIDS is active, the pipeline doesn't declare VERTEX_INPUT_BINDING_STRIDE as dynamic
if (device.IsExtExtendedDynamicStateSupported() && !device.IsExtVertexInputDynamicStateSupported()) {
scheduler.Record([index, buffer, offset, size, stride](vk::CommandBuffer cmdbuf) {
const VkDeviceSize vk_offset = buffer != VK_NULL_HANDLE ? offset : 0;
const VkDeviceSize vk_size = buffer != VK_NULL_HANDLE ? size : VK_WHOLE_SIZE;
@@ -623,7 +625,8 @@ void BufferCacheRuntime::BindVertexBuffers(VideoCommon::HostBindings<Buffer>& bi
if (binding_count == 0) {
return;
}
if (device.IsExtExtendedDynamicStateSupported()) {
// Use BindVertexBuffers2EXT only if EDS1 is supported AND VIDS is not active
if (device.IsExtExtendedDynamicStateSupported() && !device.IsExtVertexInputDynamicStateSupported()) {
scheduler.Record([bindings_ = std::move(bindings),
buffer_handles_ = std::move(buffer_handles),
binding_count](vk::CommandBuffer cmdbuf) {

View File

@@ -418,6 +418,9 @@ ConditionalRenderingResolvePass::ConditionalRenderingResolvePass(
void ConditionalRenderingResolvePass::Resolve(VkBuffer dst_buffer, VkBuffer src_buffer,
u32 src_offset, bool compare_to_zero) {
if (!device.IsExtConditionalRendering()) {
return;
}
const size_t compare_size = compare_to_zero ? 8 : 24;
compute_pass_descriptor_queue.Acquire();
@@ -448,7 +451,7 @@ void ConditionalRenderingResolvePass::Resolve(VkBuffer dst_buffer, VkBuffer src_
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
cmdbuf.Dispatch(1, 1, 1);
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT, 0, write_barrier);
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, write_barrier);
});
}
@@ -470,6 +473,14 @@ QueriesPrefixScanPass::QueriesPrefixScanPass(
void QueriesPrefixScanPass::Run(VkBuffer accumulation_buffer, VkBuffer dst_buffer,
VkBuffer src_buffer, size_t number_of_sums,
size_t min_accumulation_limit, size_t max_accumulation_limit) {
constexpr VkAccessFlags BASE_DST_ACCESS = VK_ACCESS_SHADER_READ_BIT |
VK_ACCESS_TRANSFER_READ_BIT |
VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
VK_ACCESS_INDEX_READ_BIT |
VK_ACCESS_UNIFORM_READ_BIT;
const VkAccessFlags conditional_access =
device.IsExtConditionalRendering() ? VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT : 0;
size_t current_runs = number_of_sums;
size_t offset = 0;
while (current_runs != 0) {
@@ -486,22 +497,18 @@ void QueriesPrefixScanPass::Run(VkBuffer accumulation_buffer, VkBuffer dst_buffe
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([this, descriptor_data, min_accumulation_limit, max_accumulation_limit,
runs_to_do, used_offset](vk::CommandBuffer cmdbuf) {
runs_to_do, used_offset, conditional_access](vk::CommandBuffer cmdbuf) {
static constexpr VkMemoryBarrier read_barrier{
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
.pNext = nullptr,
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
};
static constexpr VkMemoryBarrier write_barrier{
const VkMemoryBarrier write_barrier{
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
.pNext = nullptr,
.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT |
VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
VK_ACCESS_INDIRECT_COMMAND_READ_BIT | VK_ACCESS_INDEX_READ_BIT |
VK_ACCESS_UNIFORM_READ_BIT |
VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT,
.dstAccessMask = BASE_DST_ACCESS | conditional_access,
};
const QueriesPrefixScanPushConstants uniforms{
.min_accumulation_base = static_cast<u32>(min_accumulation_limit),
@@ -519,8 +526,7 @@ void QueriesPrefixScanPass::Run(VkBuffer accumulation_buffer, VkBuffer dst_buffe
cmdbuf.PushConstants(*layout, VK_SHADER_STAGE_COMPUTE_BIT, uniforms);
cmdbuf.Dispatch(1, 1, 1);
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT, 0,
write_barrier);
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, write_barrier);
});
}
}

View File

@@ -263,6 +263,7 @@ GraphicsPipeline::GraphicsPipeline(
std::ranges::copy(info->constant_buffer_used_sizes, uniform_buffer_sizes[stage].begin());
num_textures += Shader::NumDescriptors(info->texture_descriptors);
}
fragment_has_color0_output = stage_infos[NUM_STAGES - 1].stores_frag_color[0];
auto func{[this, shader_notify, &render_pass_cache, &descriptor_pool, pipeline_statistics] {
DescriptorLayoutBuilder builder{MakeBuilder(device, stage_infos)};
uses_push_descriptor = builder.CanUsePushDescriptor();
@@ -702,13 +703,18 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
.lineWidth = 1.0f,
// TODO(alekpop): Transfer from regs
};
const bool smooth_lines_supported =
device.IsExtLineRasterizationSupported() && device.SupportsSmoothLines();
const bool stippled_lines_supported =
device.IsExtLineRasterizationSupported() && device.SupportsStippledRectangularLines();
VkPipelineRasterizationLineStateCreateInfoEXT line_state{
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT,
.pNext = nullptr,
.lineRasterizationMode = key.state.smooth_lines != 0
.lineRasterizationMode = key.state.smooth_lines != 0 && smooth_lines_supported
? VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT
: VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT,
.stippledLineEnable = dynamic.line_stipple_enable ? VK_TRUE : VK_FALSE,
.stippledLineEnable =
(dynamic.line_stipple_enable && stippled_lines_supported) ? VK_TRUE : VK_FALSE,
.lineStippleFactor = key.state.line_stipple_factor,
.lineStipplePattern = static_cast<uint16_t>(key.state.line_stipple_pattern),
};
@@ -739,6 +745,8 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
provoking_vertex.pNext = std::exchange(rasterization_ci.pNext, &provoking_vertex);
}
const bool supports_alpha_output = fragment_has_color0_output;
const bool alpha_to_one_supported = device.SupportsAlphaToOne();
const VkPipelineMultisampleStateCreateInfo multisample_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
.pNext = nullptr,
@@ -747,8 +755,10 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
.sampleShadingEnable = Settings::values.sample_shading.GetValue() ? VK_TRUE : VK_FALSE,
.minSampleShading = static_cast<float>(Settings::values.sample_shading_fraction.GetValue()) / 100.0f,
.pSampleMask = nullptr,
.alphaToCoverageEnable = key.state.alpha_to_coverage_enabled != 0 ? VK_TRUE : VK_FALSE,
.alphaToOneEnable = key.state.alpha_to_one_enabled != 0 ? VK_TRUE : VK_FALSE,
.alphaToCoverageEnable =
supports_alpha_output && key.state.alpha_to_coverage_enabled != 0 ? VK_TRUE : VK_FALSE,
.alphaToOneEnable = supports_alpha_output && alpha_to_one_supported &&
key.state.alpha_to_one_enabled != 0 ? VK_TRUE : VK_FALSE,
};
const VkPipelineDepthStencilStateCreateInfo depth_stencil_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
@@ -806,14 +816,25 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
.blendConstants = {}
};
static_vector<VkDynamicState, 34> dynamic_states{
VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR,
VK_DYNAMIC_STATE_DEPTH_BIAS, VK_DYNAMIC_STATE_BLEND_CONSTANTS,
VK_DYNAMIC_STATE_DEPTH_BOUNDS, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, VK_DYNAMIC_STATE_STENCIL_REFERENCE,
VK_DYNAMIC_STATE_VIEWPORT,
VK_DYNAMIC_STATE_SCISSOR,
VK_DYNAMIC_STATE_DEPTH_BIAS,
VK_DYNAMIC_STATE_LINE_WIDTH,
};
if (device.UsesAdvancedCoreDynamicState()) {
static constexpr std::array core_dynamic_states{
VK_DYNAMIC_STATE_BLEND_CONSTANTS,
VK_DYNAMIC_STATE_DEPTH_BOUNDS,
VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
VK_DYNAMIC_STATE_STENCIL_WRITE_MASK,
VK_DYNAMIC_STATE_STENCIL_REFERENCE,
};
dynamic_states.insert(dynamic_states.end(), core_dynamic_states.begin(),
core_dynamic_states.end());
}
if (key.state.extended_dynamic_state) {
std::vector<VkDynamicState> extended{
static constexpr std::array extended{
VK_DYNAMIC_STATE_CULL_MODE_EXT,
VK_DYNAMIC_STATE_FRONT_FACE_EXT,
VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT,
@@ -823,51 +844,68 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT,
VK_DYNAMIC_STATE_STENCIL_OP_EXT,
};
if (!device.IsExtVertexInputDynamicStateSupported()) {
extended.push_back(VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT);
}
if (key.state.dynamic_vertex_input) {
dynamic_states.push_back(VK_DYNAMIC_STATE_VERTEX_INPUT_EXT);
}
dynamic_states.insert(dynamic_states.end(), extended.begin(), extended.end());
if (key.state.extended_dynamic_state_2) {
static constexpr std::array extended2{
VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT,
VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT,
VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT,
};
dynamic_states.insert(dynamic_states.end(), extended2.begin(), extended2.end());
// VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT is part of EDS1
// Only use it if VIDS is not active (VIDS replaces it with full vertex input control)
if (!key.state.dynamic_vertex_input) {
dynamic_states.push_back(VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT);
}
if (key.state.extended_dynamic_state_2_extra) {
dynamic_states.push_back(VK_DYNAMIC_STATE_LOGIC_OP_EXT);
}
// VK_DYNAMIC_STATE_VERTEX_INPUT_EXT (VIDS) - Independent from EDS
// Provides full dynamic vertex input control, replaces VERTEX_INPUT_BINDING_STRIDE
if (key.state.dynamic_vertex_input) {
dynamic_states.push_back(VK_DYNAMIC_STATE_VERTEX_INPUT_EXT);
}
// EDS2 - Core (3 states)
if (key.state.extended_dynamic_state_2) {
static constexpr std::array extended2{
VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT,
VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT,
VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT,
};
dynamic_states.insert(dynamic_states.end(), extended2.begin(), extended2.end());
}
// EDS2 - LogicOp (granular)
if (key.state.extended_dynamic_state_2_logic_op) {
dynamic_states.push_back(VK_DYNAMIC_STATE_LOGIC_OP_EXT);
}
// EDS3 - Blending (composite: 3 states)
if (key.state.extended_dynamic_state_3_blend) {
static constexpr std::array extended3{
VK_DYNAMIC_STATE_COLOR_BLEND_ENABLE_EXT,
VK_DYNAMIC_STATE_COLOR_BLEND_EQUATION_EXT,
VK_DYNAMIC_STATE_COLOR_WRITE_MASK_EXT,
};
dynamic_states.insert(dynamic_states.end(), extended3.begin(), extended3.end());
}
// EDS3 - Enables (composite: per-feature)
if (key.state.extended_dynamic_state_3_enables) {
if (device.SupportsDynamicState3DepthClampEnable()) {
dynamic_states.push_back(VK_DYNAMIC_STATE_DEPTH_CLAMP_ENABLE_EXT);
}
if (key.state.extended_dynamic_state_3_blend) {
static constexpr std::array extended3{
VK_DYNAMIC_STATE_COLOR_BLEND_ENABLE_EXT,
VK_DYNAMIC_STATE_COLOR_BLEND_EQUATION_EXT,
VK_DYNAMIC_STATE_COLOR_WRITE_MASK_EXT,
// VK_DYNAMIC_STATE_COLOR_BLEND_ADVANCED_EXT,
};
dynamic_states.insert(dynamic_states.end(), extended3.begin(), extended3.end());
if (device.SupportsDynamicState3LogicOpEnable()) {
dynamic_states.push_back(VK_DYNAMIC_STATE_LOGIC_OP_ENABLE_EXT);
}
if (key.state.extended_dynamic_state_3_enables) {
static constexpr std::array extended3{
VK_DYNAMIC_STATE_DEPTH_CLAMP_ENABLE_EXT,
VK_DYNAMIC_STATE_LOGIC_OP_ENABLE_EXT,
// additional state3 extensions
VK_DYNAMIC_STATE_LINE_RASTERIZATION_MODE_EXT,
VK_DYNAMIC_STATE_CONSERVATIVE_RASTERIZATION_MODE_EXT,
VK_DYNAMIC_STATE_LINE_STIPPLE_ENABLE_EXT,
VK_DYNAMIC_STATE_ALPHA_TO_COVERAGE_ENABLE_EXT,
VK_DYNAMIC_STATE_ALPHA_TO_ONE_ENABLE_EXT,
VK_DYNAMIC_STATE_DEPTH_CLIP_ENABLE_EXT,
VK_DYNAMIC_STATE_PROVOKING_VERTEX_MODE_EXT,
};
dynamic_states.insert(dynamic_states.end(), extended3.begin(), extended3.end());
if (device.SupportsDynamicState3LineRasterizationMode()) {
dynamic_states.push_back(VK_DYNAMIC_STATE_LINE_RASTERIZATION_MODE_EXT);
}
if (device.SupportsDynamicState3ConservativeRasterizationMode()) {
dynamic_states.push_back(VK_DYNAMIC_STATE_CONSERVATIVE_RASTERIZATION_MODE_EXT);
}
if (device.SupportsDynamicState3LineStippleEnable()) {
dynamic_states.push_back(VK_DYNAMIC_STATE_LINE_STIPPLE_ENABLE_EXT);
}
if (device.SupportsDynamicState3AlphaToCoverageEnable()) {
dynamic_states.push_back(VK_DYNAMIC_STATE_ALPHA_TO_COVERAGE_ENABLE_EXT);
}
if (device.SupportsDynamicState3AlphaToOneEnable()) {
dynamic_states.push_back(VK_DYNAMIC_STATE_ALPHA_TO_ONE_ENABLE_EXT);
}
}

View File

@@ -82,6 +82,17 @@ public:
const std::array<const Shader::Info*, NUM_STAGES>& infos);
bool HasDynamicVertexInput() const noexcept { return key.state.dynamic_vertex_input; }
bool SupportsAlphaToCoverage() const noexcept {
return fragment_has_color0_output;
}
bool SupportsAlphaToOne() const noexcept {
return fragment_has_color0_output;
}
bool UsesExtendedDynamicState() const noexcept {
return key.state.extended_dynamic_state != 0;
}
GraphicsPipeline& operator=(GraphicsPipeline&&) noexcept = delete;
GraphicsPipeline(GraphicsPipeline&&) noexcept = delete;
@@ -149,6 +160,7 @@ private:
std::array<u32, 5> enabled_uniform_buffer_masks{};
VideoCommon::UniformBufferSizes uniform_buffer_sizes{};
u32 num_textures{};
bool fragment_has_color0_output{};
vk::DescriptorSetLayout descriptor_set_layout;
DescriptorAllocator descriptor_allocator;

View File

@@ -146,7 +146,8 @@ Shader::AttributeType AttributeType(const FixedPipelineState& state, size_t inde
Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> programs,
const GraphicsPipelineCacheKey& key,
const Shader::IR::Program& program,
const Shader::IR::Program* previous_program) {
const Shader::IR::Program* previous_program,
const Vulkan::Device& device) {
Shader::RuntimeInfo info;
if (previous_program) {
info.previous_stage_stores = previous_program->info.stores;
@@ -168,10 +169,14 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program
info.fixed_state_point_size = point_size;
}
if (key.state.xfb_enabled) {
auto [varyings, count] =
VideoCommon::MakeTransformFeedbackVaryings(key.state.xfb_state);
info.xfb_varyings = varyings;
info.xfb_count = count;
if (device.IsExtTransformFeedbackSupported()) {
auto [varyings, count] =
VideoCommon::MakeTransformFeedbackVaryings(key.state.xfb_state);
info.xfb_varyings = varyings;
info.xfb_count = count;
} else {
LOG_WARNING(Render_Vulkan, "XFB requested in pipeline key but device lacks VK_EXT_transform_feedback; ignoring XFB decorations");
}
}
info.convert_depth_mode = gl_ndc;
}
@@ -218,10 +223,14 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program
info.fixed_state_point_size = point_size;
}
if (key.state.xfb_enabled != 0) {
auto [varyings, count] =
VideoCommon::MakeTransformFeedbackVaryings(key.state.xfb_state);
info.xfb_varyings = varyings;
info.xfb_count = count;
if (device.IsExtTransformFeedbackSupported()) {
auto [varyings, count] =
VideoCommon::MakeTransformFeedbackVaryings(key.state.xfb_state);
info.xfb_varyings = varyings;
info.xfb_count = count;
} else {
LOG_WARNING(Render_Vulkan, "XFB requested in pipeline key but device lacks VK_EXT_transform_feedback; ignoring XFB decorations");
}
}
info.convert_depth_mode = gl_ndc;
break;
@@ -404,14 +413,35 @@ PipelineCache::PipelineCache(Tegra::MaxwellDeviceMemoryManager& device_memory_,
device.GetMaxVertexInputBindings(), Maxwell::NumVertexArrays);
}
dynamic_features = DynamicFeatures{
.has_extended_dynamic_state = device.IsExtExtendedDynamicStateSupported(),
.has_extended_dynamic_state_2 = device.IsExtExtendedDynamicState2Supported(),
.has_extended_dynamic_state_2_extra = device.IsExtExtendedDynamicState2ExtrasSupported(),
.has_extended_dynamic_state_3_blend = device.IsExtExtendedDynamicState3BlendingSupported(),
.has_extended_dynamic_state_3_enables = device.IsExtExtendedDynamicState3EnablesSupported(),
.has_dynamic_vertex_input = device.IsExtVertexInputDynamicStateSupported(),
};
LOG_INFO(Render_Vulkan, "DynamicState setting value: {}", Settings::values.dyna_state.GetValue());
dynamic_features = {};
// User granularity enforced in vulkan_device.cpp switch statement:
// Level 0: Core Dynamic States only
// Level 1: Core + EDS1
// Level 2: Core + EDS1 + EDS2 (accumulative)
// Level 3: Core + EDS1 + EDS2 + EDS3 (accumulative)
// Here we only verify if extensions were successfully loaded by the device
dynamic_features.has_extended_dynamic_state =
device.IsExtExtendedDynamicStateSupported();
dynamic_features.has_extended_dynamic_state_2 =
device.IsExtExtendedDynamicState2Supported();
dynamic_features.has_extended_dynamic_state_2_logic_op =
device.IsExtExtendedDynamicState2ExtrasSupported();
dynamic_features.has_extended_dynamic_state_2_patch_control_points = false;
dynamic_features.has_extended_dynamic_state_3_blend =
device.IsExtExtendedDynamicState3BlendingSupported();
dynamic_features.has_extended_dynamic_state_3_enables =
device.IsExtExtendedDynamicState3EnablesSupported();
// VIDS: Independent toggle (not affected by dyna_state levels)
dynamic_features.has_dynamic_vertex_input =
device.IsExtVertexInputDynamicStateSupported() &&
Settings::values.vertex_input_dynamic_state.GetValue();
}
PipelineCache::~PipelineCache() {
@@ -516,8 +546,8 @@ void PipelineCache::LoadDiskResources(u64 title_id, std::stop_token stop_loading
dynamic_features.has_extended_dynamic_state ||
(key.state.extended_dynamic_state_2 != 0) !=
dynamic_features.has_extended_dynamic_state_2 ||
(key.state.extended_dynamic_state_2_extra != 0) !=
dynamic_features.has_extended_dynamic_state_2_extra ||
(key.state.extended_dynamic_state_2_logic_op != 0) !=
dynamic_features.has_extended_dynamic_state_2_logic_op ||
(key.state.extended_dynamic_state_3_blend != 0) !=
dynamic_features.has_extended_dynamic_state_3_blend ||
(key.state.extended_dynamic_state_3_enables != 0) !=
@@ -671,7 +701,7 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
const size_t stage_index{index - 1};
infos[stage_index] = &program.info;
const auto runtime_info{MakeRuntimeInfo(programs, key, program, previous_stage)};
const auto runtime_info{MakeRuntimeInfo(programs, key, program, previous_stage, device)};
ConvertLegacyToGeneric(program, runtime_info);
const std::vector<u32> code{EmitSPIRV(profile, runtime_info, program, binding, this->optimize_spirv_output)};
device.SaveShader(code);

View File

@@ -872,17 +872,18 @@ private:
return;
}
has_flushed_end_pending = true;
if (!has_started || buffers_count == 0) {
// Refresh buffers state before beginning transform feedback so counters are up-to-date
UpdateBuffers();
if (buffers_count == 0) {
// No counter buffers available: begin without counters
scheduler.Record([](vk::CommandBuffer cmdbuf) {
cmdbuf.BeginTransformFeedbackEXT(0, 0, nullptr, nullptr);
});
UpdateBuffers();
return;
}
scheduler.Record([this, total = static_cast<u32>(buffers_count)](vk::CommandBuffer cmdbuf) {
cmdbuf.BeginTransformFeedbackEXT(0, total, counter_buffers.data(), offsets.data());
});
UpdateBuffers();
}
void FlushEndTFB() {
@@ -892,11 +893,15 @@ private:
}
has_flushed_end_pending = false;
// Refresh buffer state before ending transform feedback to ensure counters_count is up-to-date.
UpdateBuffers();
if (buffers_count == 0) {
LOG_DEBUG(Render_Vulkan, "EndTransformFeedbackEXT called with no counters (buffers_count=0)");
scheduler.Record([](vk::CommandBuffer cmdbuf) {
cmdbuf.EndTransformFeedbackEXT(0, 0, nullptr, nullptr);
});
} else {
LOG_DEBUG(Render_Vulkan, "EndTransformFeedbackEXT called with counters (buffers_count={})", buffers_count);
scheduler.Record([this,
total = static_cast<u32>(buffers_count)](vk::CommandBuffer cmdbuf) {
cmdbuf.EndTransformFeedbackEXT(0, total, counter_buffers.data(), offsets.data());
@@ -907,6 +912,7 @@ private:
void UpdateBuffers() {
last_queries.fill(0);
last_queries_stride.fill(1);
streams_mask = 0; // reset previously recorded streams
runtime.View3DRegs([this](Maxwell3D& maxwell3d) {
buffers_count = 0;
out_topology = maxwell3d.draw_manager->GetDrawState().topology;
@@ -916,6 +922,10 @@ private:
continue;
}
const size_t stream = tf.controls[i].stream;
if (stream >= last_queries_stride.size()) {
LOG_WARNING(Render_Vulkan, "TransformFeedback stream {} out of range", stream);
continue;
}
last_queries_stride[stream] = tf.controls[i].stride;
streams_mask |= 1ULL << stream;
buffers_count = std::max<size_t>(buffers_count, stream + 1);
@@ -1116,16 +1126,21 @@ public:
query->flags |= VideoCommon::QueryFlagBits::IsFinalValueSynced;
u64 num_vertices = 0;
// Protect against stride == 0 (avoid divide-by-zero). Use fallback stride=1 and warn.
u64 safe_stride = query->stride == 0 ? 1 : query->stride;
if (query->stride == 0) {
LOG_WARNING(Render_Vulkan, "TransformFeedback query has stride 0; using 1 to avoid div-by-zero (addr=0x{:x})", query->dependant_address);
}
if (query->dependant_manage) {
auto* dependant_query = tfb_streamer.GetQuery(query->dependant_index);
num_vertices = dependant_query->value / query->stride;
num_vertices = dependant_query->value / safe_stride;
tfb_streamer.Free(query->dependant_index);
} else {
u8* pointer = device_memory.GetPointer<u8>(query->dependant_address);
if (pointer != nullptr) {
u32 result;
std::memcpy(&result, pointer, sizeof(u32));
num_vertices = static_cast<u64>(result) / query->stride;
num_vertices = static_cast<u64>(result) / safe_stride;
}
}
query->value = [&]() -> u64 {

View File

@@ -197,6 +197,11 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler),
wfi_event(device.GetLogical().CreateEvent()) {
scheduler.SetQueryCache(query_cache);
// Log multi-draw support
if (device.IsExtMultiDrawSupported()) {
LOG_INFO(Render_Vulkan, "VK_EXT_multi_draw is enabled for optimized draw calls");
}
}
RasterizerVulkan::~RasterizerVulkan() = default;
@@ -234,16 +239,44 @@ void RasterizerVulkan::Draw(bool is_indexed, u32 instance_count) {
const auto& draw_state = maxwell3d->draw_manager->GetDrawState();
const u32 num_instances{instance_count};
const DrawParams draw_params{MakeDrawParams(draw_state, num_instances, is_indexed)};
scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) {
if (draw_params.is_indexed) {
cmdbuf.DrawIndexed(draw_params.num_vertices, draw_params.num_instances,
draw_params.first_index, draw_params.base_vertex,
draw_params.base_instance);
} else {
cmdbuf.Draw(draw_params.num_vertices, draw_params.num_instances,
draw_params.base_vertex, draw_params.base_instance);
}
});
// Use VK_EXT_multi_draw if available (single draw becomes multi-draw with count=1)
if (device.IsExtMultiDrawSupported()) {
scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) {
if (draw_params.is_indexed) {
// Use multi-draw indexed with single draw
const VkMultiDrawIndexedInfoEXT multi_draw_info{
.firstIndex = draw_params.first_index,
.indexCount = draw_params.num_vertices,
};
const int32_t vertex_offset = static_cast<int32_t>(draw_params.base_vertex);
cmdbuf.DrawMultiIndexedEXT(1, &multi_draw_info, draw_params.num_instances,
draw_params.base_instance,
sizeof(VkMultiDrawIndexedInfoEXT), &vertex_offset);
} else {
// Use multi-draw with single draw
const VkMultiDrawInfoEXT multi_draw_info{
.firstVertex = draw_params.base_vertex,
.vertexCount = draw_params.num_vertices,
};
cmdbuf.DrawMultiEXT(1, &multi_draw_info, draw_params.num_instances,
draw_params.base_instance,
sizeof(VkMultiDrawInfoEXT));
}
});
} else {
// Fallback to standard draw calls
scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) {
if (draw_params.is_indexed) {
cmdbuf.DrawIndexed(draw_params.num_vertices, draw_params.num_instances,
draw_params.first_index, draw_params.base_vertex,
draw_params.base_instance);
} else {
cmdbuf.Draw(draw_params.num_vertices, draw_params.num_instances,
draw_params.base_vertex, draw_params.base_instance);
}
});
}
});
}
@@ -386,13 +419,48 @@ void RasterizerVulkan::Clear(u32 layer_count) {
.baseArrayLayer = regs.clear_surface.layer,
.layerCount = layer_count,
};
if (clear_rect.rect.extent.width == 0 || clear_rect.rect.extent.height == 0) {
const auto clamp_rect_to_render_area = [render_area](VkRect2D& rect) -> bool {
const auto clamp_axis = [](s32& offset, u32& extent, u32 limit) {
auto clamp_offset = [&offset, limit]() {
if (limit == 0) {
offset = 0;
return;
}
offset = std::clamp(offset, 0, static_cast<s32>(limit));
};
if (extent == 0) {
clamp_offset();
return;
}
if (offset < 0) {
const u32 shrink = (std::min)(extent, static_cast<u32>(-offset));
extent -= shrink;
offset = 0;
}
if (limit == 0) {
extent = 0;
offset = 0;
return;
}
if (offset >= static_cast<s32>(limit)) {
offset = static_cast<s32>(limit);
extent = 0;
return;
}
const u64 end_coord = static_cast<u64>(offset) + extent;
if (end_coord > limit) {
extent = limit - static_cast<u32>(offset);
}
};
clamp_axis(rect.offset.x, rect.extent.width, render_area.width);
clamp_axis(rect.offset.y, rect.extent.height, render_area.height);
return rect.extent.width != 0 && rect.extent.height != 0;
};
if (!clamp_rect_to_render_area(clear_rect.rect)) {
return;
}
clear_rect.rect.extent = VkExtent2D{
.width = (std::min)(clear_rect.rect.extent.width, render_area.width),
.height = (std::min)(clear_rect.rect.extent.height, render_area.height),
};
const u32 color_attachment = regs.clear_surface.RT;
if (use_color && framebuffer->HasAspectColorBit(color_attachment)) {
@@ -839,23 +907,21 @@ void RasterizerVulkan::LoadDiskResources(u64 title_id, std::stop_token stop_load
void RasterizerVulkan::FlushWork() {
#ifdef ANDROID
static constexpr u32 DRAWS_TO_DISPATCH = 1024;
static constexpr u32 DRAWS_TO_DISPATCH = 512;
static constexpr u32 CHECK_MASK = 3;
#else
static constexpr u32 DRAWS_TO_DISPATCH = 4096;
static constexpr u32 CHECK_MASK = 7;
#endif // ANDROID
// Only check multiples of 8 draws
static_assert(DRAWS_TO_DISPATCH % 8 == 0);
if ((++draw_counter & 7) != 7) {
static_assert(DRAWS_TO_DISPATCH % (CHECK_MASK + 1) == 0);
if ((++draw_counter & CHECK_MASK) != CHECK_MASK) {
return;
}
if (draw_counter < DRAWS_TO_DISPATCH) {
// Send recorded tasks to the worker thread
scheduler.DispatchWork();
return;
}
// Otherwise (every certain number of draws) flush execution.
// This submits commands to the Vulkan driver.
scheduler.Flush();
draw_counter = 0;
}
@@ -921,6 +987,8 @@ bool AccelerateDMA::BufferToImage(const Tegra::DMA::ImageCopy& copy_info,
void RasterizerVulkan::UpdateDynamicStates() {
auto& regs = maxwell3d->regs;
// Core Dynamic States (Vulkan 1.0) - Always active regardless of dyna_state setting
UpdateViewportsState(regs);
UpdateScissorsState(regs);
UpdateDepthBias(regs);
@@ -928,6 +996,8 @@ void RasterizerVulkan::UpdateDynamicStates() {
UpdateDepthBounds(regs);
UpdateStencilFaces(regs);
UpdateLineWidth(regs);
// EDS1: CullMode, DepthCompare, FrontFace, StencilOp, DepthBoundsTest, DepthTest, DepthWrite, StencilTest
if (device.IsExtExtendedDynamicStateSupported()) {
UpdateCullMode(regs);
UpdateDepthCompareOp(regs);
@@ -938,40 +1008,52 @@ void RasterizerVulkan::UpdateDynamicStates() {
UpdateDepthTestEnable(regs);
UpdateDepthWriteEnable(regs);
UpdateStencilTestEnable(regs);
if (device.IsExtExtendedDynamicState2Supported()) {
UpdatePrimitiveRestartEnable(regs);
UpdateRasterizerDiscardEnable(regs);
UpdateDepthBiasEnable(regs);
}
if (device.IsExtExtendedDynamicState3EnablesSupported()) {
using namespace Tegra::Engines;
if (device.GetDriverID() == VkDriverIdKHR::VK_DRIVER_ID_AMD_OPEN_SOURCE || device.GetDriverID() == VkDriverIdKHR::VK_DRIVER_ID_AMD_PROPRIETARY) {
const auto has_float = std::any_of(
regs.vertex_attrib_format.begin(),
regs.vertex_attrib_format.end(),
[](const auto& attrib) {
return attrib.type == Maxwell3D::Regs::VertexAttribute::Type::Float;
}
);
if (regs.logic_op.enable) {
regs.logic_op.enable = static_cast<u32>(!has_float);
}
}
UpdateLogicOpEnable(regs);
UpdateDepthClampEnable(regs);
}
}
if (device.IsExtExtendedDynamicState2ExtrasSupported()) {
UpdateLogicOp(regs);
}
if (device.IsExtExtendedDynamicState3BlendingSupported()) {
UpdateBlending(regs);
}
if (device.IsExtExtendedDynamicState3EnablesSupported()) {
UpdateLineStippleEnable(regs);
UpdateConservativeRasterizationMode(regs);
}
}
// EDS2: PrimitiveRestart, RasterizerDiscard, DepthBias enable/disable
if (device.IsExtExtendedDynamicState2Supported()) {
UpdatePrimitiveRestartEnable(regs);
UpdateRasterizerDiscardEnable(regs);
UpdateDepthBiasEnable(regs);
}
// EDS2 Extras: LogicOp operation selection
if (device.IsExtExtendedDynamicState2ExtrasSupported()) {
UpdateLogicOp(regs);
}
// EDS3 Enables: LogicOpEnable, DepthClamp, LineStipple, ConservativeRaster
if (device.IsExtExtendedDynamicState3EnablesSupported()) {
using namespace Tegra::Engines;
// AMD Workaround: LogicOp incompatible with float render targets
if (device.GetDriverID() == VkDriverIdKHR::VK_DRIVER_ID_AMD_OPEN_SOURCE ||
device.GetDriverID() == VkDriverIdKHR::VK_DRIVER_ID_AMD_PROPRIETARY) {
const auto has_float = std::any_of(
regs.vertex_attrib_format.begin(), regs.vertex_attrib_format.end(),
[](const auto& attrib) {
return attrib.type == Maxwell3D::Regs::VertexAttribute::Type::Float;
}
);
if (regs.logic_op.enable) {
regs.logic_op.enable = static_cast<u32>(!has_float);
}
}
UpdateLogicOpEnable(regs);
UpdateDepthClampEnable(regs);
UpdateLineRasterizationMode(regs);
UpdateLineStippleEnable(regs);
UpdateConservativeRasterizationMode(regs);
UpdateAlphaToCoverageEnable(regs);
UpdateAlphaToOneEnable(regs);
}
// EDS3 Blending: ColorBlendEnable, ColorBlendEquation, ColorWriteMask
if (device.IsExtExtendedDynamicState3BlendingSupported()) {
UpdateBlending(regs);
}
// Vertex Input Dynamic State: Independent from EDS levels
if (device.IsExtVertexInputDynamicStateSupported()) {
if (auto* gp = pipeline_cache.CurrentGraphicsPipeline(); gp && gp->HasDynamicVertexInput()) {
UpdateVertexInput(regs);
@@ -984,9 +1066,16 @@ void RasterizerVulkan::HandleTransformFeedback() {
const auto& regs = maxwell3d->regs;
if (!device.IsExtTransformFeedbackSupported()) {
std::call_once(warn_unsupported, [&] {
LOG_ERROR(Render_Vulkan, "Transform feedbacks used but not supported");
});
// If the guest enabled transform feedback, warn once that the device lacks support.
if (regs.transform_feedback_enabled != 0) {
std::call_once(warn_unsupported, [&] {
LOG_WARNING(Render_Vulkan, "Transform feedback requested by guest but VK_EXT_transform_feedback is unavailable; queries disabled");
});
} else {
std::call_once(warn_unsupported, [&] {
LOG_INFO(Render_Vulkan, "VK_EXT_transform_feedback not available on device");
});
}
return;
}
query_cache.CounterEnable(VideoCommon::QueryType::StreamingByteCount,
@@ -995,7 +1084,7 @@ void RasterizerVulkan::HandleTransformFeedback() {
UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderType::TessellationInit) ||
regs.IsShaderConfigEnabled(Maxwell::ShaderType::Tessellation));
}
}
}
void RasterizerVulkan::UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchViewports()) {
@@ -1144,6 +1233,9 @@ void RasterizerVulkan::UpdateBlendConstants(Tegra::Engines::Maxwell3D::Regs& reg
if (!state_tracker.TouchBlendConstants()) {
return;
}
if (!device.UsesAdvancedCoreDynamicState()) {
return;
}
const std::array blend_color = {regs.blend_color.r, regs.blend_color.g, regs.blend_color.b,
regs.blend_color.a};
scheduler.Record(
@@ -1154,6 +1246,9 @@ void RasterizerVulkan::UpdateDepthBounds(Tegra::Engines::Maxwell3D::Regs& regs)
if (!state_tracker.TouchDepthBounds()) {
return;
}
if (!device.UsesAdvancedCoreDynamicState() || !device.IsDepthBoundsSupported()) {
return;
}
scheduler.Record([min = regs.depth_bounds[0], max = regs.depth_bounds[1]](
vk::CommandBuffer cmdbuf) { cmdbuf.SetDepthBounds(min, max); });
}
@@ -1162,6 +1257,10 @@ void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs)
if (!state_tracker.TouchStencilProperties()) {
return;
}
if (!device.UsesAdvancedCoreDynamicState()) {
state_tracker.ClearStencilReset();
return;
}
bool update_references = state_tracker.TouchStencilReference();
bool update_write_mask = state_tracker.TouchStencilWriteMask();
bool update_compare_masks = state_tracker.TouchStencilCompare();
@@ -1324,6 +1423,10 @@ void RasterizerVulkan::UpdateConservativeRasterizationMode(Tegra::Engines::Maxwe
return;
}
if (!device.SupportsDynamicState3ConservativeRasterizationMode()) {
return;
}
scheduler.Record([enable = regs.conservative_raster_enable](vk::CommandBuffer cmdbuf) {
cmdbuf.SetConservativeRasterizationModeEXT(
enable ? VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT
@@ -1336,23 +1439,50 @@ void RasterizerVulkan::UpdateLineStippleEnable(Tegra::Engines::Maxwell3D::Regs&
return;
}
if (!device.SupportsDynamicState3LineStippleEnable()) {
return;
}
scheduler.Record([enable = regs.line_stipple_enable](vk::CommandBuffer cmdbuf) {
cmdbuf.SetLineStippleEnableEXT(enable);
});
}
void RasterizerVulkan::UpdateLineRasterizationMode(Tegra::Engines::Maxwell3D::Regs& regs) {
// if (!state_tracker.TouchLi()) {
// return;
// }
if (!device.IsExtLineRasterizationSupported()) {
return;
}
if (!state_tracker.TouchLineRasterizationMode()) {
return;
}
// TODO: The maxwell emulator does not capture line rasters
if (!device.SupportsDynamicState3LineRasterizationMode()) {
static std::once_flag warn_missing_rect;
std::call_once(warn_missing_rect, [] {
LOG_WARNING(Render_Vulkan,
"Driver lacks rectangular line rasterization support; skipping dynamic "
"line state updates");
});
return;
}
// scheduler.Record([enable = regs.line](vk::CommandBuffer cmdbuf) {
// cmdbuf.SetConservativeRasterizationModeEXT(
// enable ? VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT
// : VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT);
// });
const bool wants_smooth = regs.line_anti_alias_enable != 0;
VkLineRasterizationModeEXT mode = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT;
if (wants_smooth) {
if (device.SupportsSmoothLines()) {
mode = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT;
} else {
static std::once_flag warn_missing_smooth;
std::call_once(warn_missing_smooth, [] {
LOG_WARNING(Render_Vulkan,
"Line anti-aliasing requested but smoothLines feature unavailable; "
"using rectangular rasterization");
});
}
}
scheduler.Record([mode](vk::CommandBuffer cmdbuf) {
cmdbuf.SetLineRasterizationModeEXT(mode);
});
}
void RasterizerVulkan::UpdateDepthBiasEnable(Tegra::Engines::Maxwell3D::Regs& regs) {
@@ -1394,6 +1524,9 @@ void RasterizerVulkan::UpdateLogicOpEnable(Tegra::Engines::Maxwell3D::Regs& regs
if (!state_tracker.TouchLogicOpEnable()) {
return;
}
if (!device.SupportsDynamicState3LogicOpEnable()) {
return;
}
scheduler.Record([enable = regs.logic_op.enable](vk::CommandBuffer cmdbuf) {
cmdbuf.SetLogicOpEnableEXT(enable != 0);
});
@@ -1403,6 +1536,9 @@ void RasterizerVulkan::UpdateDepthClampEnable(Tegra::Engines::Maxwell3D::Regs& r
if (!state_tracker.TouchDepthClampEnable()) {
return;
}
if (!device.SupportsDynamicState3DepthClampEnable()) {
return;
}
bool is_enabled = !(regs.viewport_clip_control.geometry_clip ==
Maxwell::ViewportClipControl::GeometryClip::Passthrough ||
regs.viewport_clip_control.geometry_clip ==
@@ -1413,6 +1549,41 @@ void RasterizerVulkan::UpdateDepthClampEnable(Tegra::Engines::Maxwell3D::Regs& r
[is_enabled](vk::CommandBuffer cmdbuf) { cmdbuf.SetDepthClampEnableEXT(is_enabled); });
}
void RasterizerVulkan::UpdateAlphaToCoverageEnable(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchAlphaToCoverageEnable()) {
return;
}
if (!device.SupportsDynamicState3AlphaToCoverageEnable()) {
return;
}
GraphicsPipeline* const pipeline = pipeline_cache.CurrentGraphicsPipeline();
const bool enable = pipeline != nullptr && pipeline->SupportsAlphaToCoverage() &&
regs.anti_alias_alpha_control.alpha_to_coverage != 0;
scheduler.Record([enable](vk::CommandBuffer cmdbuf) {
cmdbuf.SetAlphaToCoverageEnableEXT(enable ? VK_TRUE : VK_FALSE);
});
}
void RasterizerVulkan::UpdateAlphaToOneEnable(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchAlphaToOneEnable()) {
return;
}
if (!device.SupportsDynamicState3AlphaToOneEnable()) {
static std::once_flag warn_alpha_to_one;
std::call_once(warn_alpha_to_one, [] {
LOG_WARNING(Render_Vulkan,
"Alpha-to-one is not supported on this device; forcing it disabled");
});
return;
}
GraphicsPipeline* const pipeline = pipeline_cache.CurrentGraphicsPipeline();
const bool enable = pipeline != nullptr && pipeline->SupportsAlphaToOne() &&
regs.anti_alias_alpha_control.alpha_to_one != 0;
scheduler.Record([enable](vk::CommandBuffer cmdbuf) {
cmdbuf.SetAlphaToOneEnableEXT(enable ? VK_TRUE : VK_FALSE);
});
}
void RasterizerVulkan::UpdateDepthCompareOp(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchDepthCompareOp()) {
return;

View File

@@ -183,6 +183,8 @@ private:
void UpdateDepthBiasEnable(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateLogicOpEnable(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateDepthClampEnable(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateAlphaToCoverageEnable(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateAlphaToOneEnable(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateFrontFace(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateStencilOp(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateStencilTestEnable(Tegra::Engines::Maxwell3D::Regs& regs);

View File

@@ -13,6 +13,7 @@
#include "common/thread.h"
#include "video_core/renderer_vulkan/vk_command_pool.h"
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
#include "video_core/renderer_vulkan/vk_master_semaphore.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_state_tracker.h"
@@ -130,9 +131,27 @@ void Scheduler::RequestOutsideRenderPassOperationContext() {
bool Scheduler::UpdateGraphicsPipeline(GraphicsPipeline* pipeline) {
if (state.graphics_pipeline == pipeline) {
if (pipeline && pipeline->UsesExtendedDynamicState() &&
state.needs_state_enable_refresh) {
state_tracker.InvalidateStateEnableFlag();
state.needs_state_enable_refresh = false;
}
return false;
}
state.graphics_pipeline = pipeline;
if (!pipeline) {
return true;
}
if (!pipeline->UsesExtendedDynamicState()) {
state.needs_state_enable_refresh = true;
} else if (state.needs_state_enable_refresh) {
state_tracker.InvalidateStateEnableFlag();
state.needs_state_enable_refresh = false;
}
return true;
}
@@ -277,6 +296,7 @@ void Scheduler::EndRenderPass()
}
query_cache->CounterEnable(VideoCommon::QueryType::ZPassPixelCount64, false);
query_cache->CounterEnable(VideoCommon::QueryType::StreamingByteCount, false);
query_cache->NotifySegment(false);
Record([num_images = num_renderpass_images,

View File

@@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -214,6 +217,7 @@ private:
GraphicsPipeline* graphics_pipeline = nullptr;
bool is_rescaling = false;
bool rescaling_defined = false;
bool needs_state_enable_refresh = false;
};
void WorkerThread(std::stop_token stop_token);

View File

@@ -48,6 +48,7 @@ Flags MakeInvalidationFlags() {
FrontFace,
StencilOp,
StencilTestEnable,
RasterizerDiscardEnable,
VertexBuffers,
VertexInput,
StateEnable,
@@ -55,6 +56,9 @@ Flags MakeInvalidationFlags() {
DepthBiasEnable,
LogicOpEnable,
DepthClampEnable,
AlphaToCoverageEnable,
AlphaToOneEnable,
LineRasterizationMode,
LogicOp,
Blending,
ColorMask,
@@ -148,6 +152,8 @@ void SetupDirtyStateEnable(Tables& tables) {
setup(OFF(logic_op.enable), LogicOpEnable);
setup(OFF(viewport_clip_control.geometry_clip), DepthClampEnable);
setup(OFF(line_stipple_enable), LineStippleEnable);
setup(OFF(anti_alias_alpha_control.alpha_to_coverage), AlphaToCoverageEnable);
setup(OFF(anti_alias_alpha_control.alpha_to_one), AlphaToOneEnable);
}
void SetupDirtyDepthCompareOp(Tables& tables) {
@@ -226,6 +232,7 @@ void SetupRasterModes(Tables &tables) {
table[OFF(line_stipple_params)] = LineStippleParams;
table[OFF(conservative_raster_enable)] = ConservativeRasterizationMode;
table[OFF(line_anti_alias_enable)] = LineRasterizationMode;
}
} // Anonymous namespace

View File

@@ -54,6 +54,7 @@ enum : u8 {
PrimitiveRestartEnable,
RasterizerDiscardEnable,
ConservativeRasterizationMode,
LineRasterizationMode,
LineStippleEnable,
LineStippleParams,
DepthBiasEnable,
@@ -61,6 +62,8 @@ enum : u8 {
LogicOp,
LogicOpEnable,
DepthClampEnable,
AlphaToCoverageEnable,
AlphaToOneEnable,
Blending,
BlendEnable,
@@ -94,6 +97,10 @@ public:
(*flags)[Dirty::Scissors] = true;
}
void InvalidateStateEnableFlag() {
(*flags)[Dirty::StateEnable] = true;
}
bool TouchViewports() {
const bool dirty_viewports = Exchange(Dirty::Viewports, false);
const bool rescale_viewports = Exchange(VideoCommon::Dirty::RescaleViewports, false);
@@ -225,6 +232,14 @@ public:
return Exchange(Dirty::DepthClampEnable, false);
}
bool TouchAlphaToCoverageEnable() {
return Exchange(Dirty::AlphaToCoverageEnable, false);
}
bool TouchAlphaToOneEnable() {
return Exchange(Dirty::AlphaToOneEnable, false);
}
bool TouchDepthCompareOp() {
return Exchange(Dirty::DepthCompareOp, false);
}
@@ -261,6 +276,10 @@ public:
return Exchange(Dirty::LogicOp, false);
}
bool TouchLineRasterizationMode() {
return Exchange(Dirty::LineRasterizationMode, false);
}
bool ChangePrimitiveTopology(Maxwell::PrimitiveTopology new_topology) {
const bool has_changed = current_topology != new_topology;
current_topology = new_topology;

View File

@@ -306,7 +306,17 @@ void Swapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities) {
swapchain_ci.queueFamilyIndexCount = static_cast<u32>(queue_indices.size());
swapchain_ci.pQueueFamilyIndices = queue_indices.data();
}
static constexpr std::array view_formats{VK_FORMAT_B8G8R8A8_UNORM, VK_FORMAT_B8G8R8A8_SRGB};
// According to Vulkan spec, when using VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR,
// the base format (imageFormat) MUST be included in pViewFormats
const std::array view_formats{
swapchain_ci.imageFormat, // Base format MUST be first
VK_FORMAT_B8G8R8A8_UNORM,
VK_FORMAT_B8G8R8A8_SRGB,
#ifdef ANDROID
VK_FORMAT_R8G8B8A8_UNORM, // Android may use RGBA
VK_FORMAT_R8G8B8A8_SRGB,
#endif
};
VkImageFormatListCreateInfo format_list{
.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR,
.pNext = nullptr,

View File

@@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -101,7 +104,7 @@ public:
}
VkSemaphore CurrentRenderSemaphore() const {
return *render_semaphores[frame_index];
return *render_semaphores[image_index];
}
u32 GetWidth() const {

View File

@@ -1104,6 +1104,8 @@ void TextureCacheRuntime::BlitImage(Framebuffer* dst_framebuffer, ImageView& dst
UNREACHABLE();
}
}();
// Use shader-based depth/stencil blits if hardware doesn't support the format
// Note: MSAA resolves (MSAA->single) use vkCmdResolveImage which works fine
if (!can_blit_depth_stencil) {
UNIMPLEMENTED_IF(is_src_msaa || is_dst_msaa);
blit_image_helper.BlitDepthStencil(dst_framebuffer, src, dst_region, src_region,
@@ -1118,6 +1120,15 @@ void TextureCacheRuntime::BlitImage(Framebuffer* dst_framebuffer, ImageView& dst
const VkImage src_image = src.ImageHandle();
const VkImageSubresourceLayers dst_layers = MakeSubresourceLayers(&dst);
const VkImageSubresourceLayers src_layers = MakeSubresourceLayers(&src);
const bool is_msaa_to_msaa = is_src_msaa && is_dst_msaa;
// NVIDIA 510+ and Intel crash on MSAA->MSAA blits (scaling operations)
// Fall back to 3D helpers for MSAA scaling
if (is_msaa_to_msaa && device.CantBlitMSAA()) {
// This should be handled by NeedsScaleHelper() and use 3D helpers instead
UNIMPLEMENTED_MSG("MSAA to MSAA blit not supported on this driver");
return;
}
const bool is_resolve = is_src_msaa && !is_dst_msaa;
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([filter, dst_region, src_region, dst_image, src_image, dst_layers, src_layers,
@@ -2222,18 +2233,26 @@ vk::ImageView ImageView::MakeView(VkFormat vk_format, VkImageAspectFlags aspect_
Sampler::Sampler(TextureCacheRuntime& runtime, const Tegra::Texture::TSCEntry& tsc) {
const auto& device = runtime.device;
const bool arbitrary_borders = runtime.device.IsExtCustomBorderColorSupported();
// Check if custom border colors are supported
const bool has_custom_border_colors = runtime.device.IsCustomBorderColorsSupported();
const bool has_format_undefined = runtime.device.IsCustomBorderColorWithoutFormatSupported();
const auto color = tsc.BorderColor();
// Determine border format based on available features:
// - If customBorderColorWithoutFormat is available: use VK_FORMAT_UNDEFINED (most flexible)
// - If only customBorderColors is available: use concrete format (R8G8B8A8_UNORM)
// - If neither is available: use standard border colors (handled by ConvertBorderColor)
const VkFormat border_format = has_format_undefined ? VK_FORMAT_UNDEFINED
: VK_FORMAT_R8G8B8A8_UNORM;
const VkSamplerCustomBorderColorCreateInfoEXT border_ci{
.sType = VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT,
.pNext = nullptr,
// TODO: Make use of std::bit_cast once libc++ supports it.
.customBorderColor = std::bit_cast<VkClearColorValue>(color),
.format = VK_FORMAT_UNDEFINED,
.format = border_format,
};
const void* pnext = nullptr;
if (arbitrary_borders) {
if (has_custom_border_colors) {
pnext = &border_ci;
}
const VkSamplerReductionModeCreateInfoEXT reduction_ci{
@@ -2267,8 +2286,8 @@ Sampler::Sampler(TextureCacheRuntime& runtime, const Tegra::Texture::TSCEntry& t
.compareOp = MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func),
.minLod = tsc.mipmap_filter == TextureMipmapFilter::None ? 0.0f : tsc.MinLod(),
.maxLod = tsc.mipmap_filter == TextureMipmapFilter::None ? 0.25f : tsc.MaxLod(),
.borderColor =
arbitrary_borders ? VK_BORDER_COLOR_FLOAT_CUSTOM_EXT : ConvertBorderColor(color),
.borderColor = has_custom_border_colors ? VK_BORDER_COLOR_FLOAT_CUSTOM_EXT
: ConvertBorderColor(color),
.unnormalizedCoordinates = VK_FALSE,
});
};

View File

@@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
@@ -241,7 +244,6 @@ public:
[[nodiscard]] VkImageView Handle(Shader::TextureType texture_type) const noexcept {
return *image_views[static_cast<size_t>(texture_type)];
}
[[nodiscard]] VkImage ImageHandle() const noexcept {
return image_handle;
}

View File

@@ -277,10 +277,7 @@ std::optional<u64> GenericEnvironment::TryFindSize() {
Tegra::Texture::TICEntry GenericEnvironment::ReadTextureInfo(GPUVAddr tic_addr, u32 tic_limit,
bool via_header_index, u32 raw) {
const auto handle{Tegra::Texture::TexturePair(raw, via_header_index)};
if (handle.first > tic_limit) {
LOG_WARNING(Shader, "Texture ID {} is out of bounds (limit {})", handle.first, tic_limit);
return {};
}
ASSERT(handle.first <= tic_limit);
const GPUVAddr descriptor_addr{tic_addr + handle.first * sizeof(Tegra::Texture::TICEntry)};
Tegra::Texture::TICEntry entry;
gpu_memory->ReadBlock(descriptor_addr, &entry, sizeof(entry));

View File

@@ -33,6 +33,7 @@ ImageInfo::ImageInfo(const TICEntry& config) noexcept {
dma_downloaded = forced_flushed;
format = PixelFormatFromTextureInfo(config.format, config.r_type, config.g_type, config.b_type,
config.a_type, config.srgb_conversion);
num_samples = NumSamples(config.msaa_mode);
resources.levels = config.max_mip_level + 1;
if (config.IsPitchLinear()) {

View File

@@ -22,6 +22,17 @@
#include <vulkan/vulkan.h>
// Define maintenance 7-9 extension names (not yet in official Vulkan headers)
#ifndef VK_KHR_MAINTENANCE_7_EXTENSION_NAME
#define VK_KHR_MAINTENANCE_7_EXTENSION_NAME "VK_KHR_maintenance7"
#endif
#ifndef VK_KHR_MAINTENANCE_8_EXTENSION_NAME
#define VK_KHR_MAINTENANCE_8_EXTENSION_NAME "VK_KHR_maintenance8"
#endif
#ifndef VK_KHR_MAINTENANCE_9_EXTENSION_NAME
#define VK_KHR_MAINTENANCE_9_EXTENSION_NAME "VK_KHR_maintenance9"
#endif
// Sanitize macros
#undef CreateEvent
#undef CreateSemaphore

View File

@@ -292,9 +292,10 @@ std::unordered_map<VkFormat, VkFormatProperties> GetFormatProperties(vk::Physica
#if defined(ANDROID) && defined(ARCHITECTURE_arm64)
void OverrideBcnFormats(std::unordered_map<VkFormat, VkFormatProperties>& format_properties) {
// These properties are extracted from Adreno driver 512.687.0
constexpr VkFormatFeatureFlags tiling_features{
VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_BLIT_SRC_BIT |
VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT | VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
constexpr VkFormatFeatureFlags tiling_features{VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
VK_FORMAT_FEATURE_BLIT_SRC_BIT |
VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT |
VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
VK_FORMAT_FEATURE_TRANSFER_DST_BIT};
constexpr VkFormatFeatureFlags buffer_features{VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT};
@@ -416,7 +417,6 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
const bool is_suitable = GetSuitability(surface != nullptr);
const VkDriverId driver_id = properties.driver.driverID;
const auto device_id = properties.properties.deviceID;
const bool is_radv = driver_id == VK_DRIVER_ID_MESA_RADV;
const bool is_amd_driver =
driver_id == VK_DRIVER_ID_AMD_PROPRIETARY || driver_id == VK_DRIVER_ID_AMD_OPEN_SOURCE;
@@ -427,11 +427,13 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
const bool is_mvk = driver_id == VK_DRIVER_ID_MOLTENVK;
const bool is_qualcomm = driver_id == VK_DRIVER_ID_QUALCOMM_PROPRIETARY;
const bool is_turnip = driver_id == VK_DRIVER_ID_MESA_TURNIP;
const bool is_s8gen2 = device_id == 0x43050a01;
//const bool is_arm = driver_id == VK_DRIVER_ID_ARM_PROPRIETARY;
const bool is_arm = driver_id == VK_DRIVER_ID_ARM_PROPRIETARY;
if (!is_suitable)
LOG_WARNING(Render_Vulkan, "Unsuitable driver - continuing anyways");
if ((is_mvk || is_qualcomm || is_turnip || is_arm) && !is_suitable) {
LOG_WARNING(Render_Vulkan, "Unsuitable driver, continuing anyway");
} else if (!is_suitable) {
throw vk::Exception(VK_ERROR_INCOMPATIBLE_DRIVER);
}
if (is_nvidia) {
nvidia_arch = GetNvidiaArchitecture(physical, supported_extensions);
@@ -492,6 +494,11 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
CollectToolingInfo();
if (is_qualcomm) {
// Qualcomm Adreno GPUs doesn't handle scaled vertex attributes; keep emulation enabled
must_emulate_scaled_formats = true;
LOG_WARNING(Render_Vulkan,
"Qualcomm drivers require scaled vertex format emulation; forcing fallback");
LOG_WARNING(Render_Vulkan,
"Disabling shader float controls and 64-bit integer features on Qualcomm proprietary drivers");
RemoveExtension(extensions.shader_float_controls, VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME);
@@ -533,35 +540,31 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
}
if (nv_major_version >= 510) {
LOG_WARNING(Render_Vulkan, "NVIDIA Drivers >= 510 do not support MSAA image blits");
LOG_WARNING(Render_Vulkan,
"NVIDIA Drivers >= 510 do not support MSAA->MSAA image blits. "
"MSAA scaling will use 3D helpers. MSAA resolves work normally.");
cant_blit_msaa = true;
}
}
if (extensions.extended_dynamic_state3 && is_radv) {
LOG_WARNING(Render_Vulkan, "RADV has broken extendedDynamicState3ColorBlendEquation");
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEnable = false;
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEquation = false;
dynamic_state3_blending = false;
const u32 version = (properties.properties.driverVersion << 3) >> 3;
if (version < VK_MAKE_API_VERSION(0, 23, 1, 0)) {
LOG_WARNING(Render_Vulkan,
"RADV versions older than 23.1.0 have broken depth clamp dynamic state");
features.extended_dynamic_state3.extendedDynamicState3DepthClampEnable = false;
dynamic_state3_enables = false;
// Mali/ NVIDIA proprietary drivers: Shader stencil export not supported
// Use hardware depth/stencil blits instead when available
if (!extensions.shader_stencil_export) {
LOG_INFO(Render_Vulkan,
"NVIDIA: VK_EXT_shader_stencil_export not supported, using hardware blits "
"for depth/stencil operations");
LOG_INFO(Render_Vulkan, " D24S8 hardware blit support: {}",
is_blit_depth24_stencil8_supported);
LOG_INFO(Render_Vulkan, " D32S8 hardware blit support: {}",
is_blit_depth32_stencil8_supported);
if (!is_blit_depth24_stencil8_supported && !is_blit_depth32_stencil8_supported) {
LOG_WARNING(Render_Vulkan,
"NVIDIA: Neither shader export nor hardware blits available for "
"depth/stencil. Performance may be degraded.");
}
}
}
if (extensions.extended_dynamic_state3 && (is_amd_driver || driver_id == VK_DRIVER_ID_SAMSUNG_PROPRIETARY)) {
// AMD and Samsung drivers have broken extendedDynamicState3ColorBlendEquation
LOG_WARNING(Render_Vulkan,
"AMD and Samsung drivers have broken extendedDynamicState3ColorBlendEquation");
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEnable = false;
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEquation = false;
dynamic_state3_blending = false;
}
sets_per_pool = 64;
if (is_amd_driver) {
// AMD drivers need a higher amount of Sets per Pool in certain circumstances like in XC2.
@@ -598,15 +601,27 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
}
if (is_intel_windows) {
LOG_WARNING(Render_Vulkan, "Intel proprietary drivers do not support MSAA image blits");
LOG_WARNING(Render_Vulkan,
"Intel proprietary drivers do not support MSAA->MSAA image blits. "
"MSAA scaling will use 3D helpers. MSAA resolves work normally.");
cant_blit_msaa = true;
}
has_broken_compute =
CheckBrokenCompute(properties.driver.driverID, properties.properties.driverVersion) &&
!Settings::values.enable_compute_pipelines.GetValue();
if (is_intel_anv || (is_qualcomm && !is_s8gen2)) {
LOG_WARNING(Render_Vulkan, "Driver does not support native BGR format");
must_emulate_bgr565 = false; // Default: assume emulation isn't required
if (is_intel_anv) {
LOG_WARNING(Render_Vulkan, "Intel ANV driver does not support native BGR format");
must_emulate_bgr565 = true;
} else if (is_qualcomm) {
LOG_WARNING(Render_Vulkan,
"Qualcomm driver mishandles BGR5 formats even with VK_KHR_maintenance5, forcing emulation");
must_emulate_bgr565 = true;
} else if (is_arm) {
LOG_WARNING(Render_Vulkan,
"ARM Mali driver mishandles BGR5 formats even with VK_KHR_maintenance5, forcing emulation");
must_emulate_bgr565 = true;
}
@@ -619,51 +634,55 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
(std::min)(properties.properties.limits.maxVertexInputBindings, 16U);
}
if (is_turnip) {
LOG_WARNING(Render_Vulkan, "Turnip requires higher-than-reported binding limits");
if (is_turnip || is_qualcomm) {
LOG_WARNING(Render_Vulkan, "Driver requires higher-than-reported binding limits");
properties.properties.limits.maxVertexInputBindings = 32;
}
if (!extensions.extended_dynamic_state && extensions.extended_dynamic_state2) {
LOG_INFO(Render_Vulkan,
"Removing extendedDynamicState2 due to missing extendedDynamicState");
RemoveExtensionFeature(extensions.extended_dynamic_state2, features.extended_dynamic_state2,
VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME);
}
if (!extensions.extended_dynamic_state2 && extensions.extended_dynamic_state3) {
LOG_INFO(Render_Vulkan,
"Removing extendedDynamicState3 due to missing extendedDynamicState2");
RemoveExtensionFeature(extensions.extended_dynamic_state3, features.extended_dynamic_state3,
VK_EXT_EXTENDED_DYNAMIC_STATE_3_EXTENSION_NAME);
dynamic_state3_blending = false;
dynamic_state3_enables = false;
}
// Mesa Intel drivers on UHD 620 have broken EDS causing extreme flickering - unknown if it affects other iGPUs
// ALSO affects ALL versions of UHD drivers on Windows 10+, seems to cause even worse issues like straight up crashing
// So... Yeah, UHD drivers fucking suck -- maybe one day we can work past this, maybe; some driver hacking?
// And then we can rest in peace by doing `< VK_MAKE_API_VERSION(26, 0, 0)` for our beloved mesa drivers... one day
if ((is_mvk || (is_integrated && is_intel_anv) || (is_integrated && is_intel_windows)) && Settings::values.dyna_state.GetValue() != 0) {
LOG_WARNING(Render_Vulkan, "Driver has broken dynamic state, forcing to 0 to prevent graphical issues");
Settings::values.dyna_state.SetValue(0);
}
// Base dynamic states (VIEWPORT, SCISSOR, DEPTH_BIAS, etc.) are ALWAYS active in vk_graphics_pipeline.cpp
// This slider controls EXTENDED dynamic states with accumulative levels per Vulkan specs:
// Level 0 = Core Dynamic States only (Vulkan 1.0)
// Level 1 = Core + VK_EXT_extended_dynamic_state
// Level 2 = Core + VK_EXT_extended_dynamic_state + VK_EXT_extended_dynamic_state2
// Level 3 = Core + VK_EXT_extended_dynamic_state + VK_EXT_extended_dynamic_state2 + VK_EXT_extended_dynamic_state3
switch (Settings::values.dyna_state.GetValue()) {
case 0:
RemoveExtensionFeature(extensions.extended_dynamic_state, features.extended_dynamic_state, VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME);
[[fallthrough]];
case 1:
RemoveExtensionFeature(extensions.extended_dynamic_state2, features.extended_dynamic_state2, VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME);
[[fallthrough]];
case 2:
RemoveExtensionFeature(extensions.extended_dynamic_state3, features.extended_dynamic_state3, VK_EXT_EXTENDED_DYNAMIC_STATE_3_EXTENSION_NAME);
// Level 0: Disable all extended dynamic state extensions
RemoveExtensionFeature(extensions.extended_dynamic_state, features.extended_dynamic_state,
VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME);
RemoveExtensionFeature(extensions.extended_dynamic_state2, features.extended_dynamic_state2,
VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME);
RemoveExtensionFeature(extensions.extended_dynamic_state3, features.extended_dynamic_state3,
VK_EXT_EXTENDED_DYNAMIC_STATE_3_EXTENSION_NAME);
dynamic_state3_blending = false;
dynamic_state3_enables = false;
break;
case 1:
// Level 1: Enable EDS1, disable EDS2 and EDS3
RemoveExtensionFeature(extensions.extended_dynamic_state2, features.extended_dynamic_state2,
VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME);
RemoveExtensionFeature(extensions.extended_dynamic_state3, features.extended_dynamic_state3,
VK_EXT_EXTENDED_DYNAMIC_STATE_3_EXTENSION_NAME);
dynamic_state3_blending = false;
dynamic_state3_enables = false;
break;
case 2:
// Level 2: Enable EDS1 + EDS2, disable EDS3
RemoveExtensionFeature(extensions.extended_dynamic_state3, features.extended_dynamic_state3,
VK_EXT_EXTENDED_DYNAMIC_STATE_3_EXTENSION_NAME);
dynamic_state3_blending = false;
dynamic_state3_enables = false;
break;
case 3:
default:
// Level 3: Enable all (EDS1 + EDS2 + EDS3)
break;
}
if (!Settings::values.vertex_input_dynamic_state.GetValue() || !extensions.extended_dynamic_state) {
// VK_EXT_vertex_input_dynamic_state is independent from EDS
// It can be enabled even without extended_dynamic_state
if (!Settings::values.vertex_input_dynamic_state.GetValue()) {
RemoveExtensionFeature(extensions.vertex_input_dynamic_state, features.vertex_input_dynamic_state, VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_EXTENSION_NAME);
}
@@ -751,8 +770,8 @@ void Device::SaveShader(std::span<const u32> spirv) const {
}
bool Device::ComputeIsOptimalAstcSupported() const {
// Disable for now to avoid converting ASTC twice.
static constexpr std::array astc_formats = {
// Verify hardware supports all ASTC formats with optimal tiling to avoid software conversion
static constexpr std::array<VkFormat, 28> astc_formats = {
VK_FORMAT_ASTC_4x4_UNORM_BLOCK, VK_FORMAT_ASTC_4x4_SRGB_BLOCK,
VK_FORMAT_ASTC_5x4_UNORM_BLOCK, VK_FORMAT_ASTC_5x4_SRGB_BLOCK,
VK_FORMAT_ASTC_5x5_UNORM_BLOCK, VK_FORMAT_ASTC_5x5_SRGB_BLOCK,
@@ -771,9 +790,10 @@ bool Device::ComputeIsOptimalAstcSupported() const {
if (!features.features.textureCompressionASTC_LDR) {
return false;
}
const auto format_feature_usage{
VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_BLIT_SRC_BIT |
VK_FORMAT_FEATURE_BLIT_DST_BIT | VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
const auto format_feature_usage{VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
VK_FORMAT_FEATURE_BLIT_SRC_BIT |
VK_FORMAT_FEATURE_BLIT_DST_BIT |
VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
VK_FORMAT_FEATURE_TRANSFER_DST_BIT};
for (const auto format : astc_formats) {
const auto physical_format_properties{physical.GetFormatProperties(format)};
@@ -970,7 +990,7 @@ bool Device::GetSuitability(bool requires_swapchain) {
// Set next pointer.
void** next = &features2.pNext;
// Vulkan 1.2, 1.3 and 1.4 features
// Vulkan 1.2 and 1.3 features
if (instance_version >= VK_API_VERSION_1_2) {
features_1_2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
features_1_3.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES;
@@ -1076,6 +1096,16 @@ bool Device::GetSuitability(bool requires_swapchain) {
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT;
SetNext(next, properties.transform_feedback);
}
if (extensions.maintenance5) {
properties.maintenance5.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_PROPERTIES_KHR;
SetNext(next, properties.maintenance5);
}
if (extensions.multi_draw) {
properties.multi_draw.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_PROPERTIES_EXT;
SetNext(next, properties.multi_draw);
}
// Perform the property fetch.
physical.GetProperties2(properties2);
@@ -1108,14 +1138,75 @@ bool Device::GetSuitability(bool requires_swapchain) {
}
}
// VK_DYNAMIC_STATE
// Driver detection variables for workarounds in GetSuitability
const VkDriverId driver_id = properties.driver.driverID;
[[maybe_unused]] const bool is_amd_driver =
driver_id == VK_DRIVER_ID_AMD_PROPRIETARY || driver_id == VK_DRIVER_ID_AMD_OPEN_SOURCE;
const bool is_intel_windows = driver_id == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS;
[[maybe_unused]] const bool is_qualcomm = driver_id == VK_DRIVER_ID_QUALCOMM_PROPRIETARY;
// VK_EXT_extended_dynamic_state2 below this will appear drivers that need workarounds.
// VK_EXT_extended_dynamic_state3 below this will appear drivers that need workarounds.
[[maybe_unused]] const auto device_id = properties.properties.deviceID;
// Samsung: Broken extendedDynamicState3ColorBlendEquation
// Disable blend equation dynamic state, force static pipeline state
if (extensions.extended_dynamic_state3 &&
(driver_id == VK_DRIVER_ID_SAMSUNG_PROPRIETARY)) {
LOG_WARNING(Render_Vulkan,
"Samsung: Disabling broken extendedDynamicState3ColorBlendEquation");
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEnable = false;
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEquation = false;
}
// Intel Windows < 27.20.100.0: Broken VertexInputDynamicState
// Disable VertexInputDynamicState on old Intel Windows drivers
if (extensions.vertex_input_dynamic_state && is_intel_windows) {
const u32 version = (properties.properties.driverVersion << 3) >> 3;
if (version < VK_MAKE_API_VERSION(27, 20, 100, 0)) {
LOG_WARNING(Render_Vulkan,
"Intel Windows < 27.20.100.0: Disabling broken VK_EXT_vertex_input_dynamic_state");
RemoveExtensionFeature(extensions.vertex_input_dynamic_state,
features.vertex_input_dynamic_state,
VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_EXTENSION_NAME);
}
}
if (Settings::values.dyna_state.GetValue() == 0) {
LOG_INFO(Render_Vulkan, "Extended Dynamic State disabled by user setting, clearing all EDS features");
features.custom_border_color.customBorderColors = false;
features.custom_border_color.customBorderColorWithoutFormat = false;
features.extended_dynamic_state.extendedDynamicState = false;
features.extended_dynamic_state2.extendedDynamicState2 = false;
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEnable = false;
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEquation = false;
features.extended_dynamic_state3.extendedDynamicState3ColorWriteMask = false;
features.extended_dynamic_state3.extendedDynamicState3DepthClampEnable = false;
features.extended_dynamic_state3.extendedDynamicState3LogicOpEnable = false;
}
// Return whether we were suitable.
return suitable;
}
void Device::RemoveUnsuitableExtensions() {
// VK_EXT_custom_border_color
extensions.custom_border_color = features.custom_border_color.customBorderColors &&
features.custom_border_color.customBorderColorWithoutFormat;
// Enable extension if driver supports it, then check individual features
// - customBorderColors: Required to use VK_BORDER_COLOR_FLOAT_CUSTOM_EXT
// - customBorderColorWithoutFormat: Optional, allows VK_FORMAT_UNDEFINED
// If only customBorderColors is available, we must provide a specific format
if (extensions.custom_border_color) {
// Verify that at least customBorderColors is available
if (!features.custom_border_color.customBorderColors) {
LOG_WARNING(Render_Vulkan,
"VK_EXT_custom_border_color reported but customBorderColors feature not available, disabling");
extensions.custom_border_color = false;
}
}
RemoveExtensionFeatureIfUnsuitable(extensions.custom_border_color, features.custom_border_color,
VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME);
@@ -1144,21 +1235,79 @@ void Device::RemoveUnsuitableExtensions() {
VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME);
// VK_EXT_extended_dynamic_state3
dynamic_state3_blending =
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEnable &&
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEquation &&
const bool supports_color_blend_enable =
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEnable;
const bool supports_color_blend_equation =
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEquation;
const bool supports_color_write_mask =
features.extended_dynamic_state3.extendedDynamicState3ColorWriteMask;
dynamic_state3_enables =
features.extended_dynamic_state3.extendedDynamicState3DepthClampEnable &&
dynamic_state3_blending = supports_color_blend_enable && supports_color_blend_equation &&
supports_color_write_mask;
const bool supports_depth_clamp_enable =
features.extended_dynamic_state3.extendedDynamicState3DepthClampEnable;
const bool supports_logic_op_enable =
features.extended_dynamic_state3.extendedDynamicState3LogicOpEnable;
const bool supports_line_raster_mode =
features.extended_dynamic_state3.extendedDynamicState3LineRasterizationMode &&
extensions.line_rasterization && features.line_rasterization.rectangularLines;
const bool supports_conservative_raster_mode =
features.extended_dynamic_state3.extendedDynamicState3ConservativeRasterizationMode &&
extensions.conservative_rasterization;
const bool supports_line_stipple_enable =
features.extended_dynamic_state3.extendedDynamicState3LineStippleEnable &&
extensions.line_rasterization && features.line_rasterization.stippledRectangularLines;
const bool supports_alpha_to_coverage =
features.extended_dynamic_state3.extendedDynamicState3AlphaToCoverageEnable;
const bool supports_alpha_to_one =
features.extended_dynamic_state3.extendedDynamicState3AlphaToOneEnable &&
features.features.alphaToOne;
dynamic_state3_depth_clamp_enable = supports_depth_clamp_enable;
dynamic_state3_logic_op_enable = supports_logic_op_enable;
dynamic_state3_line_raster_mode = supports_line_raster_mode;
dynamic_state3_conservative_raster_mode = supports_conservative_raster_mode;
dynamic_state3_line_stipple_enable = supports_line_stipple_enable;
dynamic_state3_alpha_to_coverage = supports_alpha_to_coverage;
dynamic_state3_alpha_to_one = supports_alpha_to_one;
dynamic_state3_enables = dynamic_state3_depth_clamp_enable || dynamic_state3_logic_op_enable ||
dynamic_state3_line_raster_mode ||
dynamic_state3_conservative_raster_mode ||
dynamic_state3_line_stipple_enable ||
dynamic_state3_alpha_to_coverage || dynamic_state3_alpha_to_one;
extensions.extended_dynamic_state3 = dynamic_state3_blending || dynamic_state3_enables;
dynamic_state3_blending = dynamic_state3_blending && extensions.extended_dynamic_state3;
dynamic_state3_enables = dynamic_state3_enables && extensions.extended_dynamic_state3;
if (!extensions.extended_dynamic_state3) {
dynamic_state3_blending = false;
dynamic_state3_enables = false;
dynamic_state3_depth_clamp_enable = false;
dynamic_state3_logic_op_enable = false;
dynamic_state3_line_raster_mode = false;
dynamic_state3_conservative_raster_mode = false;
dynamic_state3_line_stipple_enable = false;
dynamic_state3_alpha_to_coverage = false;
dynamic_state3_alpha_to_one = false;
}
RemoveExtensionFeatureIfUnsuitable(extensions.extended_dynamic_state3,
features.extended_dynamic_state3,
VK_EXT_EXTENDED_DYNAMIC_STATE_3_EXTENSION_NAME);
// VK_EXT_robustness2
// Enable if at least one robustness2 feature is available
extensions.robustness_2 = features.robustness2.robustBufferAccess2 ||
features.robustness2.robustImageAccess2 ||
features.robustness2.nullDescriptor;
RemoveExtensionFeatureIfUnsuitable(extensions.robustness_2, features.robustness2,
VK_EXT_ROBUSTNESS_2_EXTENSION_NAME);
// VK_EXT_image_robustness
// Enable if robustImageAccess is available
extensions.image_robustness = features.image_robustness.robustImageAccess;
RemoveExtensionFeatureIfUnsuitable(extensions.image_robustness, features.image_robustness,
VK_EXT_IMAGE_ROBUSTNESS_EXTENSION_NAME);
// VK_EXT_provoking_vertex
if (Settings::values.provoking_vertex.GetValue()) {
extensions.provoking_vertex = features.provoking_vertex.provokingVertexLast
@@ -1196,15 +1345,21 @@ void Device::RemoveUnsuitableExtensions() {
VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME);
// VK_EXT_transform_feedback
// We only require the basic transformFeedback feature and at least
// one transform feedback buffer. We keep transformFeedbackQueries as it's used by
// the streaming byte count implementation. GeometryStreams and multiple streams
// are not strictly required since we currently support only stream 0.
extensions.transform_feedback =
features.transform_feedback.transformFeedback &&
features.transform_feedback.geometryStreams &&
properties.transform_feedback.maxTransformFeedbackStreams >= 4 &&
properties.transform_feedback.maxTransformFeedbackBuffers > 0 &&
properties.transform_feedback.transformFeedbackQueries &&
properties.transform_feedback.transformFeedbackDraw;
properties.transform_feedback.transformFeedbackQueries;
RemoveExtensionFeatureIfUnsuitable(extensions.transform_feedback, features.transform_feedback,
VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME);
if (extensions.transform_feedback) {
LOG_INFO(Render_Vulkan, "VK_EXT_transform_feedback enabled (buffers={}, queries={})",
properties.transform_feedback.maxTransformFeedbackBuffers,
properties.transform_feedback.transformFeedbackQueries);
}
// VK_EXT_vertex_input_dynamic_state
extensions.vertex_input_dynamic_state =
@@ -1213,6 +1368,17 @@ void Device::RemoveUnsuitableExtensions() {
features.vertex_input_dynamic_state,
VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_EXTENSION_NAME);
// VK_EXT_multi_draw
extensions.multi_draw = features.multi_draw.multiDraw;
if (extensions.multi_draw) {
LOG_INFO(Render_Vulkan, "VK_EXT_multi_draw: maxMultiDrawCount={}",
properties.multi_draw.maxMultiDrawCount);
}
RemoveExtensionFeatureIfUnsuitable(extensions.multi_draw, features.multi_draw,
VK_EXT_MULTI_DRAW_EXTENSION_NAME);
// VK_KHR_pipeline_executable_properties
if (Settings::values.renderer_shader_feedback.GetValue()) {
extensions.pipeline_executable_properties =
@@ -1236,6 +1402,76 @@ void Device::RemoveUnsuitableExtensions() {
RemoveExtensionFeatureIfUnsuitable(extensions.workgroup_memory_explicit_layout,
features.workgroup_memory_explicit_layout,
VK_KHR_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_EXTENSION_NAME);
// VK_EXT_swapchain_maintenance1 (extension only, has features)
// Requires VK_EXT_surface_maintenance1 instance extension
extensions.swapchain_maintenance1 = features.swapchain_maintenance1.swapchainMaintenance1;
if (extensions.swapchain_maintenance1) {
// Check if VK_EXT_surface_maintenance1 instance extension is available
const auto instance_extensions = vk::EnumerateInstanceExtensionProperties(dld);
const bool has_surface_maintenance1 = instance_extensions && std::ranges::any_of(*instance_extensions,
[](const VkExtensionProperties& prop) {
return std::strcmp(prop.extensionName, VK_EXT_SURFACE_MAINTENANCE_1_EXTENSION_NAME) == 0;
});
if (!has_surface_maintenance1) {
LOG_WARNING(Render_Vulkan,
"VK_EXT_swapchain_maintenance1 requires VK_EXT_surface_maintenance1, disabling");
extensions.swapchain_maintenance1 = false;
features.swapchain_maintenance1.swapchainMaintenance1 = false;
}
}
RemoveExtensionFeatureIfUnsuitable(extensions.swapchain_maintenance1, features.swapchain_maintenance1,
VK_EXT_SWAPCHAIN_MAINTENANCE_1_EXTENSION_NAME);
// VK_KHR_maintenance1 (core in Vulkan 1.1, no features)
extensions.maintenance1 = loaded_extensions.contains(VK_KHR_MAINTENANCE_1_EXTENSION_NAME);
RemoveExtensionIfUnsuitable(extensions.maintenance1, VK_KHR_MAINTENANCE_1_EXTENSION_NAME);
// VK_KHR_maintenance2 (core in Vulkan 1.1, no features)
extensions.maintenance2 = loaded_extensions.contains(VK_KHR_MAINTENANCE_2_EXTENSION_NAME);
RemoveExtensionIfUnsuitable(extensions.maintenance2, VK_KHR_MAINTENANCE_2_EXTENSION_NAME);
// VK_KHR_maintenance3 (core in Vulkan 1.1, no features)
extensions.maintenance3 = loaded_extensions.contains(VK_KHR_MAINTENANCE_3_EXTENSION_NAME);
RemoveExtensionIfUnsuitable(extensions.maintenance3, VK_KHR_MAINTENANCE_3_EXTENSION_NAME);
// VK_KHR_maintenance4
extensions.maintenance4 = features.maintenance4.maintenance4;
RemoveExtensionFeatureIfUnsuitable(extensions.maintenance4, features.maintenance4,
VK_KHR_MAINTENANCE_4_EXTENSION_NAME);
// VK_KHR_maintenance5
extensions.maintenance5 = features.maintenance5.maintenance5;
if (extensions.maintenance5) {
LOG_INFO(Render_Vulkan, "VK_KHR_maintenance5 properties: polygonModePointSize={} "
"depthStencilSwizzleOne={} earlyFragmentTests={} nonStrictWideLines={}",
properties.maintenance5.polygonModePointSize,
properties.maintenance5.depthStencilSwizzleOneSupport,
properties.maintenance5.earlyFragmentMultisampleCoverageAfterSampleCounting &&
properties.maintenance5.earlyFragmentSampleMaskTestBeforeSampleCounting,
properties.maintenance5.nonStrictWideLinesUseParallelogram);
}
RemoveExtensionFeatureIfUnsuitable(extensions.maintenance5, features.maintenance5,
VK_KHR_MAINTENANCE_5_EXTENSION_NAME);
// VK_KHR_maintenance6
extensions.maintenance6 = features.maintenance6.maintenance6;
RemoveExtensionFeatureIfUnsuitable(extensions.maintenance6, features.maintenance6,
VK_KHR_MAINTENANCE_6_EXTENSION_NAME);
// VK_KHR_maintenance7 (proposed for Vulkan 1.4, no features)
extensions.maintenance7 = loaded_extensions.contains(VK_KHR_MAINTENANCE_7_EXTENSION_NAME);
RemoveExtensionIfUnsuitable(extensions.maintenance7, VK_KHR_MAINTENANCE_7_EXTENSION_NAME);
// VK_KHR_maintenance8 (proposed for Vulkan 1.4, no features)
extensions.maintenance8 = loaded_extensions.contains(VK_KHR_MAINTENANCE_8_EXTENSION_NAME);
RemoveExtensionIfUnsuitable(extensions.maintenance8, VK_KHR_MAINTENANCE_8_EXTENSION_NAME);
// VK_KHR_maintenance9 (proposed for Vulkan 1.4, no features)
extensions.maintenance9 = loaded_extensions.contains(VK_KHR_MAINTENANCE_9_EXTENSION_NAME);
RemoveExtensionIfUnsuitable(extensions.maintenance9, VK_KHR_MAINTENANCE_9_EXTENSION_NAME);
}
void Device::SetupFamilies(VkSurfaceKHR surface) {

View File

@@ -37,9 +37,13 @@ VK_DEFINE_HANDLE(VmaAllocator)
FEATURE(KHR, TimelineSemaphore, TIMELINE_SEMAPHORE, timeline_semaphore)
#define FOR_EACH_VK_FEATURE_1_3(FEATURE) \
FEATURE(EXT, ImageRobustness, IMAGE_ROBUSTNESS, image_robustness) \
FEATURE(EXT, ShaderDemoteToHelperInvocation, SHADER_DEMOTE_TO_HELPER_INVOCATION, \
shader_demote_to_helper_invocation) \
FEATURE(EXT, SubgroupSizeControl, SUBGROUP_SIZE_CONTROL, subgroup_size_control)
FEATURE(EXT, SubgroupSizeControl, SUBGROUP_SIZE_CONTROL, subgroup_size_control) \
FEATURE(KHR, Maintenance4, MAINTENANCE_4, maintenance4)
#define FOR_EACH_VK_FEATURE_1_4(FEATURE)
// Define all features which may be used by the implementation and require an extension here.
#define FOR_EACH_VK_FEATURE_EXT(FEATURE) \
@@ -52,12 +56,16 @@ VK_DEFINE_HANDLE(VmaAllocator)
FEATURE(EXT, 4444Formats, 4444_FORMATS, format_a4b4g4r4) \
FEATURE(EXT, IndexTypeUint8, INDEX_TYPE_UINT8, index_type_uint8) \
FEATURE(EXT, LineRasterization, LINE_RASTERIZATION, line_rasterization) \
FEATURE(EXT, MultiDraw, MULTI_DRAW, multi_draw) \
FEATURE(EXT, PrimitiveTopologyListRestart, PRIMITIVE_TOPOLOGY_LIST_RESTART, \
primitive_topology_list_restart) \
FEATURE(EXT, ProvokingVertex, PROVOKING_VERTEX, provoking_vertex) \
FEATURE(EXT, Robustness2, ROBUSTNESS_2, robustness2) \
FEATURE(EXT, TransformFeedback, TRANSFORM_FEEDBACK, transform_feedback) \
FEATURE(EXT, VertexInputDynamicState, VERTEX_INPUT_DYNAMIC_STATE, vertex_input_dynamic_state) \
FEATURE(EXT, SwapchainMaintenance1, SWAPCHAIN_MAINTENANCE_1, swapchain_maintenance1) \
FEATURE(KHR, Maintenance5, MAINTENANCE_5, maintenance5) \
FEATURE(KHR, Maintenance6, MAINTENANCE_6, maintenance6) \
FEATURE(KHR, PipelineExecutableProperties, PIPELINE_EXECUTABLE_PROPERTIES, \
pipeline_executable_properties) \
FEATURE(KHR, WorkgroupMemoryExplicitLayout, WORKGROUP_MEMORY_EXPLICIT_LAYOUT, \
@@ -84,6 +92,12 @@ VK_DEFINE_HANDLE(VmaAllocator)
EXTENSION(KHR, SWAPCHAIN, swapchain) \
EXTENSION(KHR, SWAPCHAIN_MUTABLE_FORMAT, swapchain_mutable_format) \
EXTENSION(KHR, IMAGE_FORMAT_LIST, image_format_list) \
EXTENSION(KHR, MAINTENANCE_1, maintenance1) \
EXTENSION(KHR, MAINTENANCE_2, maintenance2) \
EXTENSION(KHR, MAINTENANCE_3, maintenance3) \
EXTENSION(KHR, MAINTENANCE_7, maintenance7) \
EXTENSION(KHR, MAINTENANCE_8, maintenance8) \
EXTENSION(KHR, MAINTENANCE_9, maintenance9) \
EXTENSION(NV, DEVICE_DIAGNOSTICS_CONFIG, device_diagnostics_config) \
EXTENSION(NV, GEOMETRY_SHADER_PASSTHROUGH, geometry_shader_passthrough) \
EXTENSION(NV, VIEWPORT_ARRAY2, viewport_array2) \
@@ -110,6 +124,7 @@ VK_DEFINE_HANDLE(VmaAllocator)
EXTENSION_NAME(VK_EXT_EXTENDED_DYNAMIC_STATE_3_EXTENSION_NAME) \
EXTENSION_NAME(VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME) \
EXTENSION_NAME(VK_EXT_4444_FORMATS_EXTENSION_NAME) \
EXTENSION_NAME(VK_EXT_IMAGE_ROBUSTNESS_EXTENSION_NAME) \
EXTENSION_NAME(VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME) \
EXTENSION_NAME(VK_EXT_ROBUSTNESS_2_EXTENSION_NAME) \
EXTENSION_NAME(VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_EXTENSION_NAME) \
@@ -161,6 +176,7 @@ VK_DEFINE_HANDLE(VmaAllocator)
FEATURE_NAME(depth_bias_control, depthBiasExact) \
FEATURE_NAME(extended_dynamic_state, extendedDynamicState) \
FEATURE_NAME(format_a4b4g4r4, formatA4B4G4R4) \
FEATURE_NAME(image_robustness, robustImageAccess) \
FEATURE_NAME(index_type_uint8, indexTypeUint8) \
FEATURE_NAME(primitive_topology_list_restart, primitiveTopologyListRestart) \
FEATURE_NAME(provoking_vertex, provokingVertexLast) \
@@ -440,6 +456,11 @@ public:
return extensions.swapchain_mutable_format;
}
/// Returns true if VK_EXT_swapchain_maintenance1 is enabled.
bool IsExtSwapchainMaintenance1Enabled() const {
return extensions.swapchain_maintenance1;
}
/// Returns true if VK_KHR_shader_float_controls is enabled.
bool IsKhrShaderFloatControlsSupported() const {
return extensions.shader_float_controls;
@@ -476,10 +497,18 @@ public:
}
/// Returns true if the device supports VK_EXT_shader_stencil_export.
/// Note: Most Mali/NVIDIA drivers don't support this. Use hardware blits as fallback.
bool IsExtShaderStencilExportSupported() const {
return extensions.shader_stencil_export;
}
/// Returns true if depth/stencil operations can be performed efficiently.
/// Either through shader export or hardware blits.
bool CanPerformDepthStencilOperations() const {
return extensions.shader_stencil_export || is_blit_depth24_stencil8_supported ||
is_blit_depth32_stencil8_supported;
}
/// Returns true if the device supports VK_EXT_depth_range_unrestricted.
bool IsExtDepthRangeUnrestrictedSupported() const {
return extensions.depth_range_unrestricted;
@@ -520,6 +549,46 @@ public:
return extensions.custom_border_color;
}
/// Returns true if the device supports VK_EXT_image_robustness.
bool IsExtImageRobustnessSupported() const {
return extensions.image_robustness;
}
/// Returns true if robustImageAccess is supported.
bool IsRobustImageAccessSupported() const {
return features.image_robustness.robustImageAccess;
}
/// Returns true if the device supports VK_EXT_robustness2.
bool IsExtRobustness2Supported() const {
return extensions.robustness_2;
}
/// Returns true if robustBufferAccess2 is supported.
bool IsRobustBufferAccess2Supported() const {
return features.robustness2.robustBufferAccess2;
}
/// Returns true if robustImageAccess2 is supported.
bool IsRobustImageAccess2Supported() const {
return features.robustness2.robustImageAccess2;
}
/// Returns true if nullDescriptor is supported.
bool IsNullDescriptorSupported() const {
return features.robustness2.nullDescriptor;
}
/// Returns true if customBorderColors feature is available.
bool IsCustomBorderColorsSupported() const {
return features.custom_border_color.customBorderColors;
}
/// Returns true if customBorderColorWithoutFormat feature is available.
bool IsCustomBorderColorWithoutFormatSupported() const {
return features.custom_border_color.customBorderColorWithoutFormat;
}
/// Returns true if the device supports VK_EXT_extended_dynamic_state.
bool IsExtExtendedDynamicStateSupported() const {
return extensions.extended_dynamic_state;
@@ -569,6 +638,55 @@ public:
return extensions.line_rasterization;
}
bool SupportsRectangularLines() const {
return features.line_rasterization.rectangularLines != VK_FALSE;
}
bool SupportsSmoothLines() const {
return features.line_rasterization.smoothLines != VK_FALSE;
}
bool SupportsStippledRectangularLines() const {
return features.line_rasterization.stippledRectangularLines != VK_FALSE;
}
bool SupportsAlphaToOne() const {
return features.features.alphaToOne != VK_FALSE;
}
bool SupportsDynamicState3DepthClampEnable() const {
return dynamic_state3_depth_clamp_enable;
}
bool SupportsDynamicState3LogicOpEnable() const {
return dynamic_state3_logic_op_enable;
}
bool SupportsDynamicState3LineRasterizationMode() const {
return dynamic_state3_line_raster_mode;
}
bool SupportsDynamicState3ConservativeRasterizationMode() const {
return dynamic_state3_conservative_raster_mode;
}
bool SupportsDynamicState3LineStippleEnable() const {
return dynamic_state3_line_stipple_enable;
}
bool SupportsDynamicState3AlphaToCoverageEnable() const {
return dynamic_state3_alpha_to_coverage;
}
bool SupportsDynamicState3AlphaToOneEnable() const {
return dynamic_state3_alpha_to_one;
}
/// Returns true when the user enabled extended core dynamic states (level > 0).
bool UsesAdvancedCoreDynamicState() const {
return Settings::values.dyna_state.GetValue() > 0;
}
/// Returns true if the device supports VK_EXT_vertex_input_dynamic_state.
bool IsExtVertexInputDynamicStateSupported() const {
return extensions.vertex_input_dynamic_state;
@@ -703,6 +821,73 @@ public:
return features2.features.multiViewport;
}
/// Returns true if the device supports VK_KHR_maintenance1.
bool IsKhrMaintenance1Supported() const {
return extensions.maintenance1;
}
/// Returns true if the device supports VK_KHR_maintenance2.
bool IsKhrMaintenance2Supported() const {
return extensions.maintenance2;
}
/// Returns true if the device supports VK_KHR_maintenance3.
bool IsKhrMaintenance3Supported() const {
return extensions.maintenance3;
}
/// Returns true if the device supports VK_KHR_maintenance4.
bool IsKhrMaintenance4Supported() const {
return extensions.maintenance4;
}
/// Returns true if the device supports VK_KHR_maintenance5.
bool IsKhrMaintenance5Supported() const {
return extensions.maintenance5;
}
/// Returns true if polygon mode POINT supports gl_PointSize.
bool SupportsPolygonModePointSize() const {
return extensions.maintenance5 && properties.maintenance5.polygonModePointSize;
}
/// Returns true if depth/stencil swizzle ONE is supported.
bool SupportsDepthStencilSwizzleOne() const {
return extensions.maintenance5 && properties.maintenance5.depthStencilSwizzleOneSupport;
}
/// Returns true if early fragment tests optimizations are available.
bool SupportsEarlyFragmentTests() const {
return extensions.maintenance5 &&
properties.maintenance5.earlyFragmentMultisampleCoverageAfterSampleCounting &&
properties.maintenance5.earlyFragmentSampleMaskTestBeforeSampleCounting;
}
/// Returns true if the device supports VK_KHR_maintenance6.
bool IsKhrMaintenance6Supported() const {
return extensions.maintenance6;
}
/// Returns true if the device supports VK_EXT_multi_draw.
bool IsExtMultiDrawSupported() const {
return extensions.multi_draw;
}
/// Returns true if the device supports VK_KHR_maintenance7.
bool IsKhrMaintenance7Supported() const {
return extensions.maintenance7;
}
/// Returns true if the device supports VK_KHR_maintenance8.
bool IsKhrMaintenance8Supported() const {
return extensions.maintenance8;
}
/// Returns true if the device supports VK_KHR_maintenance9.
bool IsKhrMaintenance9Supported() const {
return extensions.maintenance9;
}
[[nodiscard]] static constexpr bool CheckBrokenCompute(VkDriverId driver_id,
u32 driver_version) {
if (driver_id == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS) {
@@ -786,6 +971,7 @@ private:
FOR_EACH_VK_FEATURE_1_1(FEATURE);
FOR_EACH_VK_FEATURE_1_2(FEATURE);
FOR_EACH_VK_FEATURE_1_3(FEATURE);
FOR_EACH_VK_FEATURE_1_4(FEATURE);
FOR_EACH_VK_FEATURE_EXT(FEATURE);
FOR_EACH_VK_EXTENSION(EXTENSION);
@@ -802,6 +988,7 @@ private:
FOR_EACH_VK_FEATURE_1_1(FEATURE_CORE);
FOR_EACH_VK_FEATURE_1_2(FEATURE_CORE);
FOR_EACH_VK_FEATURE_1_3(FEATURE_CORE);
FOR_EACH_VK_FEATURE_1_4(FEATURE_CORE);
FOR_EACH_VK_FEATURE_EXT(FEATURE_EXT);
#undef FEATURE_CORE
@@ -817,6 +1004,8 @@ private:
VkPhysicalDevicePushDescriptorPropertiesKHR push_descriptor{};
VkPhysicalDeviceSubgroupSizeControlProperties subgroup_size_control{};
VkPhysicalDeviceTransformFeedbackPropertiesEXT transform_feedback{};
VkPhysicalDeviceMaintenance5PropertiesKHR maintenance5{};
VkPhysicalDeviceMultiDrawPropertiesEXT multi_draw{};
VkPhysicalDeviceProperties properties{};
};
@@ -846,8 +1035,15 @@ private:
bool cant_blit_msaa{}; ///< Does not support MSAA<->MSAA blitting.
bool must_emulate_scaled_formats{}; ///< Requires scaled vertex format emulation
bool must_emulate_bgr565{}; ///< Emulates BGR565 by swizzling RGB565 format.
bool dynamic_state3_blending{}; ///< Has all blending features of dynamic_state3.
bool dynamic_state3_enables{}; ///< Has all enables features of dynamic_state3.
bool dynamic_state3_blending{}; ///< Has blending features of dynamic_state3.
bool dynamic_state3_enables{}; ///< Has at least one enable feature of dynamic_state3.
bool dynamic_state3_depth_clamp_enable{};
bool dynamic_state3_logic_op_enable{};
bool dynamic_state3_line_raster_mode{};
bool dynamic_state3_conservative_raster_mode{};
bool dynamic_state3_line_stipple_enable{};
bool dynamic_state3_alpha_to_coverage{};
bool dynamic_state3_alpha_to_one{};
bool supports_conditional_barriers{}; ///< Allows barriers in conditional control flow.
u64 device_access_memory{}; ///< Total size of device local memory in bytes.
u32 sets_per_pool{}; ///< Sets per Description Pool

View File

@@ -116,6 +116,8 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
X(vkCmdDrawIndirectCount);
X(vkCmdDrawIndexedIndirectCount);
X(vkCmdDrawIndirectByteCountEXT);
X(vkCmdDrawMultiEXT);
X(vkCmdDrawMultiIndexedEXT);
X(vkCmdEndConditionalRenderingEXT);
X(vkCmdEndQuery);
X(vkCmdEndRenderPass);
@@ -145,6 +147,8 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
X(vkCmdSetDepthWriteEnableEXT);
X(vkCmdSetPrimitiveRestartEnableEXT);
X(vkCmdSetRasterizerDiscardEnableEXT);
X(vkCmdSetAlphaToCoverageEnableEXT);
X(vkCmdSetAlphaToOneEnableEXT);
X(vkCmdSetConservativeRasterizationModeEXT);
X(vkCmdSetLineRasterizationModeEXT);
X(vkCmdSetLineStippleEnableEXT);

View File

@@ -216,6 +216,8 @@ struct DeviceDispatch : InstanceDispatch {
PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount{};
PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount{};
PFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT{};
PFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT{};
PFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT{};
PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT{};
PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT{};
PFN_vkCmdEndQuery vkCmdEndQuery{};
@@ -238,6 +240,8 @@ struct DeviceDispatch : InstanceDispatch {
PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT{};
PFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT{};
PFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT{};
PFN_vkCmdSetAlphaToCoverageEnableEXT vkCmdSetAlphaToCoverageEnableEXT{};
PFN_vkCmdSetAlphaToOneEnableEXT vkCmdSetAlphaToOneEnableEXT{};
PFN_vkCmdSetConservativeRasterizationModeEXT vkCmdSetConservativeRasterizationModeEXT{};
PFN_vkCmdSetLineRasterizationModeEXT vkCmdSetLineRasterizationModeEXT{};
PFN_vkCmdSetLineStippleEnableEXT vkCmdSetLineStippleEnableEXT{};
@@ -1239,6 +1243,19 @@ public:
counter_buffer_offset, counter_offset, stride);
}
void DrawMultiEXT(u32 draw_count, const VkMultiDrawInfoEXT* vertex_info,
u32 instance_count, u32 first_instance, u32 stride) const noexcept {
dld->vkCmdDrawMultiEXT(handle, draw_count, vertex_info, instance_count, first_instance,
stride);
}
void DrawMultiIndexedEXT(u32 draw_count, const VkMultiDrawIndexedInfoEXT* index_info,
u32 instance_count, u32 first_instance, u32 stride,
const int32_t* vertex_offset) const noexcept {
dld->vkCmdDrawMultiIndexedEXT(handle, draw_count, index_info, instance_count,
first_instance, stride, vertex_offset);
}
void ClearAttachments(Span<VkClearAttachment> attachments,
Span<VkClearRect> rects) const noexcept {
dld->vkCmdClearAttachments(handle, attachments.size(), attachments.data(), rects.size(),
@@ -1471,6 +1488,14 @@ public:
dld->vkCmdSetLogicOpEnableEXT(handle, enable ? VK_TRUE : VK_FALSE);
}
void SetAlphaToCoverageEnableEXT(bool enable) const noexcept {
dld->vkCmdSetAlphaToCoverageEnableEXT(handle, enable ? VK_TRUE : VK_FALSE);
}
void SetAlphaToOneEnableEXT(bool enable) const noexcept {
dld->vkCmdSetAlphaToOneEnableEXT(handle, enable ? VK_TRUE : VK_FALSE);
}
void SetDepthClampEnableEXT(bool enable) const noexcept {
dld->vkCmdSetDepthClampEnableEXT(handle, enable ? VK_TRUE : VK_FALSE);
}