Compare commits
6 Commits
v0.0.4.tes
...
v0.0.4-rc1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
15becaa5a3 | ||
|
|
3d4bb08cfc | ||
|
|
b2ac18173f | ||
|
|
39f226a853 | ||
|
|
61ab1be0e7 | ||
|
|
3ba9769d7a |
@@ -47,6 +47,26 @@ Various graphical filters exist - each of them aimed at a specific target/image
|
||||
|
||||
While stock shaders offer a basic subset of options for most users, programs such as [ReShade](https://github.com/crosire/reshade) offer a more flexible experience. In addition to that users can also seek out modifications (mods) for enhancing visual experience (60 FPS mods, HDR, etc).
|
||||
|
||||
**Installing ReShade (Windows)**
|
||||
1. [Download ReShade](https://reshade.me/#download) with add-on support.
|
||||
- [ReShade Setup 6.6.2 (Windows 64-bit) with add-on support](https://reshade.me/downloads/ReShade_Setup_6.6.2_Addon.exe)
|
||||
- SHA512 checksum: `1f09a73afa160480c13ffdd73cc04b1dc82943dddea58ad3bb9375f26b48c2787d0a85197e46b6fce32a4fd3472465520a3355ed3436241e17fba7ebaff7ffec`.
|
||||
2. Open ReShade and hit browse, then the folder where `eden.exe` is at, hit open, then hit next.
|
||||
3. Select Vulkan as the rendering API, hit next.
|
||||
4. In "Select effects to install" screen: hit next don't change anything.
|
||||
5. In "Select add on" screen: click the box for `Shader Toggler by Otis` ([GitHub](https://github.com/FransBouma/ShaderToggler)) and hit next.
|
||||
|
||||
**Using the Shader Toggler**
|
||||
1. Launch a game, you must see a ReShade pop up afterwards.
|
||||
2. Progress to a point with a flickering shader.
|
||||
3. Hit the Home key on keyboard (or change binds if you don't have one).
|
||||
4. Navigate to the add on tab at the top of the ReShade menu.
|
||||
5. At the bottom where Shader Toggler is at open the drop down and max out the slider that says "# of fames collected" then select change shaders while staring at the flickering shader.
|
||||
6. When the Shader Toggler finishes collecting frames in the top left hit Numpad 2 till it turns off the flickering lines.
|
||||
7. Hit Numpad 3 to add it the group of shaders to turn off and hit done and save all toggle groups.
|
||||
8. Hit the edit button and select "active at startup" for the shader to be turned off on every game launch.
|
||||
9. Caps lock to manually turn on and off the shader (default key you can change it with the previous edit button)
|
||||
|
||||
## Driver specifics
|
||||
|
||||
### Mesa environment variable hacks
|
||||
|
||||
@@ -188,7 +188,7 @@ void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::siz
|
||||
}
|
||||
// Successfully dequeued a new buffer.
|
||||
{
|
||||
std::unique_lock lk{release_mutex};\
|
||||
std::unique_lock lk{release_mutex};
|
||||
queued_buffers--;
|
||||
}
|
||||
release_cv.notify_one();
|
||||
@@ -237,9 +237,15 @@ u64 SinkStream::GetExpectedPlayedSampleCount() {
|
||||
|
||||
void SinkStream::WaitFreeSpace(std::stop_token stop_token) {
|
||||
std::unique_lock lk{release_mutex};
|
||||
release_cv.wait_for(lk, std::chrono::milliseconds(5), [this]() { return paused || queued_buffers < max_queue_size; });
|
||||
if (queued_buffers > max_queue_size + 3) {
|
||||
release_cv.wait(lk, stop_token, [this] { return paused || queued_buffers < max_queue_size; });
|
||||
|
||||
const auto has_space = [this]() {
|
||||
const u32 current_size = queued_buffers.load(std::memory_order_relaxed);
|
||||
return paused || max_queue_size == 0 || current_size < max_queue_size;
|
||||
};
|
||||
|
||||
if (!has_space()) {
|
||||
// Wait until the queue falls below the configured limit or the stream is paused/stopped.
|
||||
release_cv.wait(lk, stop_token, has_space);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -240,7 +240,7 @@ private:
|
||||
/// Ring buffer of the samples waiting to be played or consumed
|
||||
Common::RingBuffer<s16, 0x10000> samples_buffer;
|
||||
/// Audio buffers queued and waiting to play
|
||||
Common::SPSCQueue<SinkBuffer, 0x40000> queue;
|
||||
Common::SPSCQueue<SinkBuffer, 0x10000> queue;
|
||||
/// The currently-playing audio buffer
|
||||
SinkBuffer playing_buffer{};
|
||||
/// The last played (or received) frame of audio, used when the callback underruns
|
||||
|
||||
@@ -153,6 +153,8 @@ add_library(
|
||||
wall_clock.h
|
||||
zstd_compression.cpp
|
||||
zstd_compression.h
|
||||
fs/ryujinx_compat.h fs/ryujinx_compat.cpp
|
||||
fs/symlink.h fs/symlink.cpp
|
||||
)
|
||||
|
||||
if(WIN32)
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#define SUDACHI_DIR "sudachi"
|
||||
#define YUZU_DIR "yuzu"
|
||||
#define SUYU_DIR "suyu"
|
||||
#define RYUJINX_DIR "Ryujinx"
|
||||
|
||||
// yuzu-specific files
|
||||
#define LOG_FILE "eden_log.txt"
|
||||
|
||||
@@ -84,7 +84,7 @@ public:
|
||||
return eden_paths.at(eden_path);
|
||||
}
|
||||
|
||||
[[nodiscard]] const fs::path& GetLegacyPathImpl(LegacyPath legacy_path) {
|
||||
[[nodiscard]] const fs::path& GetLegacyPathImpl(EmuPath legacy_path) {
|
||||
return legacy_paths.at(legacy_path);
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ public:
|
||||
eden_paths.insert_or_assign(eden_path, new_path);
|
||||
}
|
||||
|
||||
void SetLegacyPathImpl(LegacyPath legacy_path, const fs::path& new_path) {
|
||||
void SetLegacyPathImpl(EmuPath legacy_path, const fs::path& new_path) {
|
||||
legacy_paths.insert_or_assign(legacy_path, new_path);
|
||||
}
|
||||
|
||||
@@ -118,9 +118,9 @@ public:
|
||||
}
|
||||
eden_path_cache = eden_path / CACHE_DIR;
|
||||
eden_path_config = eden_path / CONFIG_DIR;
|
||||
#define LEGACY_PATH(titleName, upperName) GenerateLegacyPath(LegacyPath::titleName##Dir, GetAppDataRoamingDirectory() / upperName##_DIR); \
|
||||
GenerateLegacyPath(LegacyPath::titleName##ConfigDir, GetAppDataRoamingDirectory() / upperName##_DIR / CONFIG_DIR); \
|
||||
GenerateLegacyPath(LegacyPath::titleName##CacheDir, GetAppDataRoamingDirectory() / upperName##_DIR / CACHE_DIR);
|
||||
#define LEGACY_PATH(titleName, upperName) GenerateLegacyPath(EmuPath::titleName##Dir, GetAppDataRoamingDirectory() / upperName##_DIR); \
|
||||
GenerateLegacyPath(EmuPath::titleName##ConfigDir, GetAppDataRoamingDirectory() / upperName##_DIR / CONFIG_DIR); \
|
||||
GenerateLegacyPath(EmuPath::titleName##CacheDir, GetAppDataRoamingDirectory() / upperName##_DIR / CACHE_DIR);
|
||||
LEGACY_PATH(Citron, CITRON)
|
||||
LEGACY_PATH(Sudachi, SUDACHI)
|
||||
LEGACY_PATH(Yuzu, YUZU)
|
||||
@@ -140,9 +140,9 @@ public:
|
||||
eden_path_cache = eden_path / CACHE_DIR;
|
||||
eden_path_config = eden_path / CONFIG_DIR;
|
||||
}
|
||||
#define LEGACY_PATH(titleName, upperName) GenerateLegacyPath(LegacyPath::titleName##Dir, GetDataDirectory("XDG_DATA_HOME") / upperName##_DIR); \
|
||||
GenerateLegacyPath(LegacyPath::titleName##ConfigDir, GetDataDirectory("XDG_CONFIG_HOME") / upperName##_DIR); \
|
||||
GenerateLegacyPath(LegacyPath::titleName##CacheDir, GetDataDirectory("XDG_CACHE_HOME") / upperName##_DIR);
|
||||
#define LEGACY_PATH(titleName, upperName) GenerateLegacyPath(EmuPath::titleName##Dir, GetDataDirectory("XDG_DATA_HOME") / upperName##_DIR); \
|
||||
GenerateLegacyPath(EmuPath::titleName##ConfigDir, GetDataDirectory("XDG_CONFIG_HOME") / upperName##_DIR); \
|
||||
GenerateLegacyPath(EmuPath::titleName##CacheDir, GetDataDirectory("XDG_CACHE_HOME") / upperName##_DIR);
|
||||
LEGACY_PATH(Citron, CITRON)
|
||||
LEGACY_PATH(Sudachi, SUDACHI)
|
||||
LEGACY_PATH(Yuzu, YUZU)
|
||||
@@ -165,6 +165,15 @@ public:
|
||||
GenerateEdenPath(EdenPath::ShaderDir, eden_path / SHADER_DIR);
|
||||
GenerateEdenPath(EdenPath::TASDir, eden_path / TAS_DIR);
|
||||
GenerateEdenPath(EdenPath::IconsDir, eden_path / ICONS_DIR);
|
||||
|
||||
#ifdef _WIN32
|
||||
GenerateLegacyPath(EmuPath::RyujinxDir, GetAppDataRoamingDirectory() / RYUJINX_DIR);
|
||||
#else
|
||||
// In Ryujinx's infinite wisdom, it places EVERYTHING in the config directory on UNIX
|
||||
// This is incredibly stupid and violates a million XDG standards, but whatever
|
||||
GenerateLegacyPath(EmuPath::RyujinxDir, GetDataDirectory("XDG_CONFIG_HOME") / RYUJINX_DIR);
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
private:
|
||||
@@ -179,12 +188,12 @@ private:
|
||||
SetEdenPathImpl(eden_path, new_path);
|
||||
}
|
||||
|
||||
void GenerateLegacyPath(LegacyPath legacy_path, const fs::path& new_path) {
|
||||
void GenerateLegacyPath(EmuPath legacy_path, const fs::path& new_path) {
|
||||
SetLegacyPathImpl(legacy_path, new_path);
|
||||
}
|
||||
|
||||
std::unordered_map<EdenPath, fs::path> eden_paths;
|
||||
std::unordered_map<LegacyPath, fs::path> legacy_paths;
|
||||
std::unordered_map<EmuPath, fs::path> legacy_paths;
|
||||
};
|
||||
|
||||
bool ValidatePath(const fs::path& path) {
|
||||
@@ -272,7 +281,7 @@ const fs::path& GetEdenPath(EdenPath eden_path) {
|
||||
return PathManagerImpl::GetInstance().GetEdenPathImpl(eden_path);
|
||||
}
|
||||
|
||||
const std::filesystem::path& GetLegacyPath(LegacyPath legacy_path) {
|
||||
const std::filesystem::path& GetLegacyPath(EmuPath legacy_path) {
|
||||
return PathManagerImpl::GetInstance().GetLegacyPathImpl(legacy_path);
|
||||
}
|
||||
|
||||
@@ -280,7 +289,7 @@ std::string GetEdenPathString(EdenPath eden_path) {
|
||||
return PathToUTF8String(GetEdenPath(eden_path));
|
||||
}
|
||||
|
||||
std::string GetLegacyPathString(LegacyPath legacy_path) {
|
||||
std::string GetLegacyPathString(EmuPath legacy_path) {
|
||||
return PathToUTF8String(GetLegacyPath(legacy_path));
|
||||
}
|
||||
|
||||
|
||||
@@ -32,22 +32,26 @@ enum class EdenPath {
|
||||
IconsDir, // Where Icons for Windows shortcuts are stored.
|
||||
};
|
||||
|
||||
enum LegacyPath {
|
||||
CitronDir, // Citron Directories for migration
|
||||
// migration/compat dirs
|
||||
enum EmuPath {
|
||||
CitronDir,
|
||||
CitronConfigDir,
|
||||
CitronCacheDir,
|
||||
|
||||
SudachiDir, // Sudachi Directories for migration
|
||||
SudachiDir,
|
||||
SudachiConfigDir,
|
||||
SudachiCacheDir,
|
||||
|
||||
YuzuDir, // Yuzu Directories for migration
|
||||
YuzuDir,
|
||||
YuzuConfigDir,
|
||||
YuzuCacheDir,
|
||||
|
||||
SuyuDir, // Suyu Directories for migration
|
||||
SuyuDir,
|
||||
SuyuConfigDir,
|
||||
SuyuCacheDir,
|
||||
|
||||
// used exclusively for save data linking
|
||||
RyujinxDir,
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -229,7 +233,7 @@ void SetAppDirectory(const std::string& app_directory);
|
||||
*
|
||||
* @returns The filesystem path associated with the LegacyPath enum.
|
||||
*/
|
||||
[[nodiscard]] const std::filesystem::path& GetLegacyPath(LegacyPath legacy_path);
|
||||
[[nodiscard]] const std::filesystem::path& GetLegacyPath(EmuPath legacy_path);
|
||||
|
||||
/**
|
||||
* Gets the filesystem path associated with the EdenPath enum as a UTF-8 encoded std::string.
|
||||
@@ -247,7 +251,7 @@ void SetAppDirectory(const std::string& app_directory);
|
||||
*
|
||||
* @returns The filesystem path associated with the LegacyPath enum as a UTF-8 encoded std::string.
|
||||
*/
|
||||
[[nodiscard]] std::string GetLegacyPathString(LegacyPath legacy_path);
|
||||
[[nodiscard]] std::string GetLegacyPathString(EmuPath legacy_path);
|
||||
|
||||
/**
|
||||
* Sets a new filesystem path associated with the EdenPath enum.
|
||||
|
||||
93
src/common/fs/ryujinx_compat.cpp
Normal file
93
src/common/fs/ryujinx_compat.cpp
Normal file
@@ -0,0 +1,93 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "ryujinx_compat.h"
|
||||
#include "common/fs/path_util.h"
|
||||
#include <cstddef>
|
||||
#include <cstring>
|
||||
#include <fmt/ranges.h>
|
||||
#include <fstream>
|
||||
|
||||
namespace Common::FS {
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
fs::path GetKvdbPath()
|
||||
{
|
||||
return GetLegacyPath(EmuPath::RyujinxDir) / "bis" / "system" / "save" / "8000000000000000" / "0"
|
||||
/ "imkvdb.arc";
|
||||
}
|
||||
|
||||
fs::path GetRyuSavePath(const u64 &save_id)
|
||||
{
|
||||
std::string hex = fmt::format("{:016x}", save_id);
|
||||
|
||||
// TODO: what's the difference between 0 and 1?
|
||||
return GetLegacyPath(EmuPath::RyujinxDir) / "bis" / "user" / "save" / hex / "0";
|
||||
}
|
||||
|
||||
IMENReadResult ReadKvdb(const fs::path &path, std::vector<IMEN> &imens)
|
||||
{
|
||||
std::ifstream kvdb{path, std::ios::binary | std::ios::ate};
|
||||
|
||||
if (!kvdb) {
|
||||
return IMENReadResult::Nonexistent;
|
||||
}
|
||||
|
||||
size_t file_size = kvdb.tellg();
|
||||
|
||||
// IMKV header + 8 bytes
|
||||
if (file_size < 0xB) {
|
||||
return IMENReadResult::NoHeader;
|
||||
}
|
||||
|
||||
// magic (not the wizard kind)
|
||||
kvdb.seekg(0, std::ios::beg);
|
||||
char header[12];
|
||||
kvdb.read(header, 12);
|
||||
|
||||
if (std::memcmp(header, IMKV_MAGIC, 4) != 0) {
|
||||
return IMENReadResult::InvalidMagic;
|
||||
}
|
||||
|
||||
// calculate num. of imens left
|
||||
std::size_t remaining = (file_size - 12);
|
||||
std::size_t num_imens = remaining / IMEN_SIZE;
|
||||
|
||||
// File is misaligned and probably corrupt (rip)
|
||||
if (remaining % IMEN_SIZE != 0) {
|
||||
return IMENReadResult::Misaligned;
|
||||
}
|
||||
|
||||
// if there aren't any IMENs, it's empty and we can safely no-op out of here
|
||||
if (num_imens == 0) {
|
||||
return IMENReadResult::NoImens;
|
||||
}
|
||||
|
||||
imens.reserve(num_imens);
|
||||
|
||||
// initially I wanted to do a struct, but imkvdb is 140 bytes
|
||||
// while the compiler will murder you if you try to align u64 to 4 bytes
|
||||
for (std::size_t i = 0; i < num_imens; ++i) {
|
||||
char magic[4];
|
||||
u64 title_id = 0;
|
||||
u64 save_id = 0;
|
||||
|
||||
kvdb.read(magic, 4);
|
||||
if (std::memcmp(magic, IMEN_MAGIC, 4) != 0) {
|
||||
return IMENReadResult::InvalidMagic;
|
||||
}
|
||||
|
||||
kvdb.ignore(0x8);
|
||||
kvdb.read(reinterpret_cast<char *>(&title_id), 8);
|
||||
kvdb.ignore(0x38);
|
||||
kvdb.read(reinterpret_cast<char *>(&save_id), 8);
|
||||
kvdb.ignore(0x38);
|
||||
|
||||
imens.emplace_back(IMEN{title_id, save_id});
|
||||
}
|
||||
|
||||
return IMENReadResult::Success;
|
||||
}
|
||||
|
||||
} // namespace Common::FS
|
||||
40
src/common/fs/ryujinx_compat.h
Normal file
40
src/common/fs/ryujinx_compat.h
Normal file
@@ -0,0 +1,40 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include <filesystem>
|
||||
#include <vector>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace Common::FS {
|
||||
|
||||
constexpr const char IMEN_MAGIC[4] = {0x49, 0x4d, 0x45, 0x4e};
|
||||
constexpr const char IMKV_MAGIC[4] = {0x49, 0x4d, 0x4b, 0x56};
|
||||
constexpr const u8 IMEN_SIZE = 0x8c;
|
||||
|
||||
fs::path GetKvdbPath();
|
||||
fs::path GetRyuSavePath(const u64 &program_id);
|
||||
|
||||
enum class IMENReadResult {
|
||||
Nonexistent, // ryujinx not found
|
||||
NoHeader, // file isn't big enough for header
|
||||
InvalidMagic, // no IMKV or IMEN header
|
||||
Misaligned, // file isn't aligned to expected IMEN boundaries
|
||||
NoImens, // no-op, there are no IMENs
|
||||
Success, // :)
|
||||
};
|
||||
|
||||
struct IMEN
|
||||
{
|
||||
u64 title_id;
|
||||
u64 save_id;
|
||||
};
|
||||
|
||||
static_assert(sizeof(IMEN) == 0x10, "IMEN has incorrect size.");
|
||||
|
||||
IMENReadResult ReadKvdb(const fs::path &path, std::vector<IMEN> &imens);
|
||||
|
||||
} // namespace Common::FS
|
||||
43
src/common/fs/symlink.cpp
Normal file
43
src/common/fs/symlink.cpp
Normal file
@@ -0,0 +1,43 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "symlink.h"
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <windows.h>
|
||||
#include <fmt/format.h>
|
||||
#endif
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
// The sole purpose of this file is to treat symlinks like symlinks on POSIX,
|
||||
// or treat them as directory junctions on Windows.
|
||||
// This is because, for some inexplicable reason, Microsoft has locked symbolic
|
||||
// links behind a "security policy", whereas directory junctions--functionally identical
|
||||
// for directories, by the way--are not. Why? I don't know.
|
||||
|
||||
namespace Common::FS {
|
||||
|
||||
bool CreateSymlink(const fs::path &from, const fs::path &to)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
const std::string command = fmt::format("mklink /J {} {}", to.string(), from.string());
|
||||
return system(command.c_str()) == 0;
|
||||
#else
|
||||
std::error_code ec;
|
||||
fs::create_directory_symlink(from, to, ec);
|
||||
return !ec;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool IsSymlink(const fs::path &path)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
auto attributes = GetFileAttributesW(path.wstring().c_str());
|
||||
return attributes & FILE_ATTRIBUTE_REPARSE_POINT;
|
||||
#else
|
||||
return fs::is_symlink(path);
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace Common::FS
|
||||
12
src/common/fs/symlink.h
Normal file
12
src/common/fs/symlink.h
Normal file
@@ -0,0 +1,12 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <filesystem>
|
||||
namespace Common::FS {
|
||||
|
||||
bool CreateSymlink(const std::filesystem::path &from, const std::filesystem::path &to);
|
||||
bool IsSymlink(const std::filesystem::path &path);
|
||||
|
||||
} // namespace Common::FS
|
||||
@@ -6,10 +6,9 @@ add_library(dynarmic STATIC
|
||||
backend/block_range_information.cpp
|
||||
backend/block_range_information.h
|
||||
backend/exception_handler.h
|
||||
common/always_false.h
|
||||
common/assert.cpp
|
||||
common/assert.h
|
||||
common/cast_util.h
|
||||
common/type_util.h
|
||||
common/common_types.h
|
||||
common/crypto/aes.cpp
|
||||
common/crypto/aes.h
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
#include "dynarmic/backend/arm64/devirtualize.h"
|
||||
#include "dynarmic/backend/arm64/emit_arm64.h"
|
||||
#include "dynarmic/backend/arm64/stack_layout.h"
|
||||
#include "dynarmic/common/cast_util.h"
|
||||
#include "dynarmic/common/type_util.h"
|
||||
#include "dynarmic/common/fp/fpcr.h"
|
||||
#include "dynarmic/frontend/A32/a32_location_descriptor.h"
|
||||
#include "dynarmic/frontend/A32/translate/a32_translate.h"
|
||||
@@ -93,9 +93,9 @@ static void* EmitExclusiveReadCallTrampoline(oaknut::CodeGenerator& code, const
|
||||
|
||||
code.align(8);
|
||||
code.l(l_this);
|
||||
code.dx(mcl::bit_cast<u64>(&conf));
|
||||
code.dx(std::bit_cast<u64>(&conf));
|
||||
code.l(l_addr);
|
||||
code.dx(mcl::bit_cast<u64>(Common::FptrCast(fn)));
|
||||
code.dx(std::bit_cast<u64>(Common::FptrCast(fn)));
|
||||
|
||||
return target;
|
||||
}
|
||||
@@ -151,9 +151,9 @@ static void* EmitExclusiveWriteCallTrampoline(oaknut::CodeGenerator& code, const
|
||||
|
||||
code.align(8);
|
||||
code.l(l_this);
|
||||
code.dx(mcl::bit_cast<u64>(&conf));
|
||||
code.dx(std::bit_cast<u64>(&conf));
|
||||
code.l(l_addr);
|
||||
code.dx(mcl::bit_cast<u64>(Common::FptrCast(fn)));
|
||||
code.dx(std::bit_cast<u64>(Common::FptrCast(fn)));
|
||||
|
||||
return target;
|
||||
}
|
||||
@@ -219,7 +219,7 @@ void A32AddressSpace::EmitPrelude() {
|
||||
code.MOV(Xstate, X1);
|
||||
code.MOV(Xhalt, X2);
|
||||
if (conf.page_table) {
|
||||
code.MOV(Xpagetable, mcl::bit_cast<u64>(conf.page_table));
|
||||
code.MOV(Xpagetable, std::bit_cast<u64>(conf.page_table));
|
||||
}
|
||||
if (conf.fastmem_pointer) {
|
||||
code.MOV(Xfastmem, *conf.fastmem_pointer);
|
||||
@@ -258,7 +258,7 @@ void A32AddressSpace::EmitPrelude() {
|
||||
code.MOV(Xstate, X1);
|
||||
code.MOV(Xhalt, X2);
|
||||
if (conf.page_table) {
|
||||
code.MOV(Xpagetable, mcl::bit_cast<u64>(conf.page_table));
|
||||
code.MOV(Xpagetable, std::bit_cast<u64>(conf.page_table));
|
||||
}
|
||||
if (conf.fastmem_pointer) {
|
||||
code.MOV(Xfastmem, *conf.fastmem_pointer);
|
||||
@@ -317,9 +317,9 @@ void A32AddressSpace::EmitPrelude() {
|
||||
|
||||
code.align(8);
|
||||
code.l(l_this);
|
||||
code.dx(mcl::bit_cast<u64>(this));
|
||||
code.dx(std::bit_cast<u64>(this));
|
||||
code.l(l_addr);
|
||||
code.dx(mcl::bit_cast<u64>(Common::FptrCast(fn)));
|
||||
code.dx(std::bit_cast<u64>(Common::FptrCast(fn)));
|
||||
}
|
||||
|
||||
prelude_info.return_from_run_code = code.xptr<void*>();
|
||||
@@ -347,7 +347,7 @@ void A32AddressSpace::EmitPrelude() {
|
||||
|
||||
code.align(8);
|
||||
code.l(l_return_to_dispatcher);
|
||||
code.dx(mcl::bit_cast<u64>(prelude_info.return_to_dispatcher));
|
||||
code.dx(std::bit_cast<u64>(prelude_info.return_to_dispatcher));
|
||||
|
||||
prelude_info.end_of_prelude = code.offset();
|
||||
|
||||
@@ -369,7 +369,7 @@ EmitConfig A32AddressSpace::GetEmitConfig() {
|
||||
|
||||
.check_halt_on_memory_access = conf.check_halt_on_memory_access,
|
||||
|
||||
.page_table_pointer = mcl::bit_cast<u64>(conf.page_table),
|
||||
.page_table_pointer = std::bit_cast<u64>(conf.page_table),
|
||||
.page_table_address_space_bits = 32,
|
||||
.page_table_pointer_mask_bits = conf.page_table_pointer_mask_bits,
|
||||
.silently_mirror_page_table = true,
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
#include "dynarmic/backend/arm64/devirtualize.h"
|
||||
#include "dynarmic/backend/arm64/emit_arm64.h"
|
||||
#include "dynarmic/backend/arm64/stack_layout.h"
|
||||
#include "dynarmic/common/cast_util.h"
|
||||
#include "dynarmic/common/type_util.h"
|
||||
#include "dynarmic/frontend/A64/a64_location_descriptor.h"
|
||||
#include "dynarmic/frontend/A64/translate/a64_translate.h"
|
||||
#include "dynarmic/interface/A64/config.h"
|
||||
@@ -92,9 +92,9 @@ static void* EmitExclusiveReadCallTrampoline(oaknut::CodeGenerator& code, const
|
||||
|
||||
code.align(8);
|
||||
code.l(l_this);
|
||||
code.dx(mcl::bit_cast<u64>(&conf));
|
||||
code.dx(std::bit_cast<u64>(&conf));
|
||||
code.l(l_addr);
|
||||
code.dx(mcl::bit_cast<u64>(Common::FptrCast(fn)));
|
||||
code.dx(std::bit_cast<u64>(Common::FptrCast(fn)));
|
||||
|
||||
return target;
|
||||
}
|
||||
@@ -150,9 +150,9 @@ static void* EmitExclusiveWriteCallTrampoline(oaknut::CodeGenerator& code, const
|
||||
|
||||
code.align(8);
|
||||
code.l(l_this);
|
||||
code.dx(mcl::bit_cast<u64>(&conf));
|
||||
code.dx(std::bit_cast<u64>(&conf));
|
||||
code.l(l_addr);
|
||||
code.dx(mcl::bit_cast<u64>(Common::FptrCast(fn)));
|
||||
code.dx(std::bit_cast<u64>(Common::FptrCast(fn)));
|
||||
|
||||
return target;
|
||||
}
|
||||
@@ -235,9 +235,9 @@ static void* EmitExclusiveRead128CallTrampoline(oaknut::CodeGenerator& code, con
|
||||
|
||||
code.align(8);
|
||||
code.l(l_this);
|
||||
code.dx(mcl::bit_cast<u64>(&conf));
|
||||
code.dx(std::bit_cast<u64>(&conf));
|
||||
code.l(l_addr);
|
||||
code.dx(mcl::bit_cast<u64>(Common::FptrCast(fn)));
|
||||
code.dx(std::bit_cast<u64>(Common::FptrCast(fn)));
|
||||
|
||||
return target;
|
||||
}
|
||||
@@ -317,9 +317,9 @@ static void* EmitExclusiveWrite128CallTrampoline(oaknut::CodeGenerator& code, co
|
||||
|
||||
code.align(8);
|
||||
code.l(l_this);
|
||||
code.dx(mcl::bit_cast<u64>(&conf));
|
||||
code.dx(std::bit_cast<u64>(&conf));
|
||||
code.l(l_addr);
|
||||
code.dx(mcl::bit_cast<u64>(Common::FptrCast(fn)));
|
||||
code.dx(std::bit_cast<u64>(Common::FptrCast(fn)));
|
||||
|
||||
return target;
|
||||
}
|
||||
@@ -396,7 +396,7 @@ void A64AddressSpace::EmitPrelude() {
|
||||
code.MOV(Xstate, X1);
|
||||
code.MOV(Xhalt, X2);
|
||||
if (conf.page_table) {
|
||||
code.MOV(Xpagetable, mcl::bit_cast<u64>(conf.page_table));
|
||||
code.MOV(Xpagetable, std::bit_cast<u64>(conf.page_table));
|
||||
}
|
||||
if (conf.fastmem_pointer) {
|
||||
code.MOV(Xfastmem, *conf.fastmem_pointer);
|
||||
@@ -434,7 +434,7 @@ void A64AddressSpace::EmitPrelude() {
|
||||
code.MOV(Xstate, X1);
|
||||
code.MOV(Xhalt, X2);
|
||||
if (conf.page_table) {
|
||||
code.MOV(Xpagetable, mcl::bit_cast<u64>(conf.page_table));
|
||||
code.MOV(Xpagetable, std::bit_cast<u64>(conf.page_table));
|
||||
}
|
||||
if (conf.fastmem_pointer) {
|
||||
code.MOV(Xfastmem, *conf.fastmem_pointer);
|
||||
@@ -492,9 +492,9 @@ void A64AddressSpace::EmitPrelude() {
|
||||
|
||||
code.align(8);
|
||||
code.l(l_this);
|
||||
code.dx(mcl::bit_cast<u64>(this));
|
||||
code.dx(std::bit_cast<u64>(this));
|
||||
code.l(l_addr);
|
||||
code.dx(mcl::bit_cast<u64>(Common::FptrCast(fn)));
|
||||
code.dx(std::bit_cast<u64>(Common::FptrCast(fn)));
|
||||
}
|
||||
|
||||
prelude_info.return_from_run_code = code.xptr<void*>();
|
||||
@@ -522,7 +522,7 @@ void A64AddressSpace::EmitPrelude() {
|
||||
|
||||
code.align(8);
|
||||
code.l(l_return_to_dispatcher);
|
||||
code.dx(mcl::bit_cast<u64>(prelude_info.return_to_dispatcher));
|
||||
code.dx(std::bit_cast<u64>(prelude_info.return_to_dispatcher));
|
||||
|
||||
prelude_info.end_of_prelude = code.offset();
|
||||
|
||||
@@ -544,7 +544,7 @@ EmitConfig A64AddressSpace::GetEmitConfig() {
|
||||
|
||||
.check_halt_on_memory_access = conf.check_halt_on_memory_access,
|
||||
|
||||
.page_table_pointer = mcl::bit_cast<u64>(conf.page_table),
|
||||
.page_table_pointer = std::bit_cast<u64>(conf.page_table),
|
||||
.page_table_address_space_bits = conf.page_table_address_space_bits,
|
||||
.page_table_pointer_mask_bits = conf.page_table_pointer_mask_bits,
|
||||
.silently_mirror_page_table = conf.silently_mirror_page_table,
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
#include "dynarmic/common/assert.h"
|
||||
#include <oaknut/oaknut.hpp>
|
||||
|
||||
#include "dynarmic/common/always_false.h"
|
||||
|
||||
namespace Dynarmic::Backend::Arm64 {
|
||||
|
||||
@@ -37,7 +36,8 @@ constexpr auto Rscratch0() {
|
||||
} else if constexpr (bitsize == 64) {
|
||||
return Xscratch0;
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<bitsize>>);
|
||||
// TODO: This codepath is regarded as "takeable" on gcc12
|
||||
return Xscratch0; //static_assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,7 +48,8 @@ constexpr auto Rscratch1() {
|
||||
} else if constexpr (bitsize == 64) {
|
||||
return Xscratch1;
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<bitsize>>);
|
||||
// TODO: This codepath is regarded as "takeable" on gcc12
|
||||
return Xscratch1; //static_assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2022 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
|
||||
#include <mcl/bit_cast.hpp>
|
||||
#include <bit>
|
||||
#include <numeric>
|
||||
|
||||
#include "dynarmic/backend/arm64/a64_address_space.h"
|
||||
#include "dynarmic/backend/arm64/a64_jitstate.h"
|
||||
@@ -13,7 +16,7 @@
|
||||
#include "dynarmic/backend/arm64/devirtualize.h"
|
||||
#include "dynarmic/backend/arm64/emit_arm64.h"
|
||||
#include "dynarmic/backend/arm64/stack_layout.h"
|
||||
#include "dynarmic/common/cast_util.h"
|
||||
#include "dynarmic/common/type_util.h"
|
||||
#include "dynarmic/common/fp/fpcr.h"
|
||||
#include "dynarmic/common/llvm_disassemble.h"
|
||||
#include "dynarmic/interface/exclusive_monitor.h"
|
||||
@@ -99,7 +102,7 @@ void AddressSpace::ClearCache() {
|
||||
|
||||
void AddressSpace::DumpDisassembly() const {
|
||||
for (u32* ptr = mem.ptr(); ptr < code.xptr<u32*>(); ptr++) {
|
||||
std::printf("%s", Common::DisassembleAArch64(*ptr, mcl::bit_cast<u64>(ptr)).c_str());
|
||||
std::printf("%s", Common::DisassembleAArch64(*ptr, std::bit_cast<u64>(ptr)).c_str());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -316,7 +319,7 @@ void AddressSpace::RelinkForDescriptor(IR::LocationDescriptor target_descriptor,
|
||||
|
||||
FakeCall AddressSpace::FastmemCallback(u64 host_pc) {
|
||||
{
|
||||
const auto host_ptr = mcl::bit_cast<CodePtr>(host_pc);
|
||||
const auto host_ptr = std::bit_cast<CodePtr>(host_pc);
|
||||
|
||||
const auto entry_point = ReverseGetEntryPoint(host_ptr);
|
||||
if (!entry_point) {
|
||||
|
||||
@@ -8,7 +8,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <mcl/bit_cast.hpp>
|
||||
#include <bit>
|
||||
#include <numeric>
|
||||
#include "dynarmic/common/common_types.h"
|
||||
#include <mcl/type_traits/function_info.hpp>
|
||||
|
||||
@@ -23,7 +24,7 @@ struct DevirtualizedCall {
|
||||
template<auto mfp>
|
||||
DevirtualizedCall DevirtualizeWindows(mcl::class_type<decltype(mfp)>* this_) {
|
||||
static_assert(sizeof(mfp) == 8);
|
||||
return DevirtualizedCall{mcl::bit_cast<u64>(mfp), reinterpret_cast<u64>(this_)};
|
||||
return DevirtualizedCall{std::bit_cast<u64>(mfp), reinterpret_cast<u64>(this_)};
|
||||
}
|
||||
|
||||
// https://github.com/ARM-software/abi-aa/blob/main/cppabi64/cppabi64.rst#representation-of-pointer-to-member-function
|
||||
@@ -34,16 +35,16 @@ DevirtualizedCall DevirtualizeDefault(mcl::class_type<decltype(mfp)>* this_) {
|
||||
u64 ptr;
|
||||
// LSB is discriminator for if function is virtual. Other bits are this adjustment.
|
||||
u64 adj;
|
||||
} mfp_struct = mcl::bit_cast<MemberFunctionPointer>(mfp);
|
||||
} mfp_struct = std::bit_cast<MemberFunctionPointer>(mfp);
|
||||
|
||||
static_assert(sizeof(MemberFunctionPointer) == 16);
|
||||
static_assert(sizeof(MemberFunctionPointer) == sizeof(mfp));
|
||||
|
||||
u64 fn_ptr = mfp_struct.ptr;
|
||||
u64 this_ptr = mcl::bit_cast<u64>(this_) + (mfp_struct.adj >> 1);
|
||||
u64 this_ptr = std::bit_cast<u64>(this_) + (mfp_struct.adj >> 1);
|
||||
if (mfp_struct.adj & 1) {
|
||||
u64 vtable = mcl::bit_cast_pointee<u64>(this_ptr);
|
||||
fn_ptr = mcl::bit_cast_pointee<u64>(vtable + fn_ptr);
|
||||
u64 vtable = std::bit_cast<u64>(this_ptr);
|
||||
fn_ptr = std::bit_cast<u64>(vtable + fn_ptr);
|
||||
}
|
||||
return DevirtualizedCall{fn_ptr, this_ptr};
|
||||
}
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2022 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <mcl/bit_cast.hpp>
|
||||
#include <bit>
|
||||
#include <numeric>
|
||||
#include <oaknut/oaknut.hpp>
|
||||
|
||||
#include "dynarmic/backend/arm64/a64_jitstate.h"
|
||||
@@ -495,7 +499,7 @@ template<>
|
||||
void EmitIR<IR::Opcode::A64GetTPIDR>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
auto Xvalue = ctx.reg_alloc.WriteX(inst);
|
||||
RegAlloc::Realize(Xvalue);
|
||||
code.MOV(Xscratch0, mcl::bit_cast<u64>(ctx.conf.tpidr_el0));
|
||||
code.MOV(Xscratch0, std::bit_cast<u64>(ctx.conf.tpidr_el0));
|
||||
code.LDR(Xvalue, Xscratch0);
|
||||
}
|
||||
|
||||
@@ -503,7 +507,7 @@ template<>
|
||||
void EmitIR<IR::Opcode::A64GetTPIDRRO>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
auto Xvalue = ctx.reg_alloc.WriteX(inst);
|
||||
RegAlloc::Realize(Xvalue);
|
||||
code.MOV(Xscratch0, mcl::bit_cast<u64>(ctx.conf.tpidrro_el0));
|
||||
code.MOV(Xscratch0, std::bit_cast<u64>(ctx.conf.tpidrro_el0));
|
||||
code.LDR(Xvalue, Xscratch0);
|
||||
}
|
||||
|
||||
@@ -512,7 +516,7 @@ void EmitIR<IR::Opcode::A64SetTPIDR>(oaknut::CodeGenerator& code, EmitContext& c
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
auto Xvalue = ctx.reg_alloc.ReadX(args[0]);
|
||||
RegAlloc::Realize(Xvalue);
|
||||
code.MOV(Xscratch0, mcl::bit_cast<u64>(ctx.conf.tpidr_el0));
|
||||
code.MOV(Xscratch0, std::bit_cast<u64>(ctx.conf.tpidr_el0));
|
||||
code.STR(Xvalue, Xscratch0);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2022 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
@@ -7,8 +10,8 @@
|
||||
|
||||
#include <optional>
|
||||
#include <utility>
|
||||
|
||||
#include <mcl/bit_cast.hpp>
|
||||
#include <bit>
|
||||
#include <numeric>
|
||||
#include <oaknut/oaknut.hpp>
|
||||
|
||||
#include "dynarmic/backend/arm64/abi.h"
|
||||
@@ -548,7 +551,7 @@ void FastmemEmitReadMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::In
|
||||
FastmemPatchInfo{
|
||||
.marker = marker,
|
||||
.fc = FakeCall{
|
||||
.call_pc = mcl::bit_cast<u64>(code.xptr<void*>()),
|
||||
.call_pc = std::bit_cast<u64>(code.xptr<void*>()),
|
||||
},
|
||||
.recompile = ctx.conf.recompile_on_fastmem_failure,
|
||||
});
|
||||
@@ -598,7 +601,7 @@ void FastmemEmitWriteMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::I
|
||||
FastmemPatchInfo{
|
||||
.marker = marker,
|
||||
.fc = FakeCall{
|
||||
.call_pc = mcl::bit_cast<u64>(code.xptr<void*>()),
|
||||
.call_pc = std::bit_cast<u64>(code.xptr<void*>()),
|
||||
},
|
||||
.recompile = ctx.conf.recompile_on_fastmem_failure,
|
||||
});
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2022 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
@@ -12,7 +15,6 @@
|
||||
#include "dynarmic/backend/arm64/emit_context.h"
|
||||
#include "dynarmic/backend/arm64/fpsr_manager.h"
|
||||
#include "dynarmic/backend/arm64/reg_alloc.h"
|
||||
#include "dynarmic/common/always_false.h"
|
||||
#include "dynarmic/ir/basic_block.h"
|
||||
#include "dynarmic/ir/microinstruction.h"
|
||||
#include "dynarmic/ir/opcodes.h"
|
||||
@@ -43,7 +45,7 @@ static void EmitTwoOpArranged(oaknut::CodeGenerator& code, EmitContext& ctx, IR:
|
||||
} else if constexpr (size == 64) {
|
||||
emit(Qresult->D2(), Qoperand->D2());
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<size>>);
|
||||
//static_assert(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -66,7 +68,7 @@ static void EmitTwoOpArrangedWiden(oaknut::CodeGenerator& code, EmitContext& ctx
|
||||
} else if constexpr (size == 32) {
|
||||
emit(Qresult->D2(), Qoperand->toD().S2());
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<size>>);
|
||||
//static_assert(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -81,7 +83,7 @@ static void EmitTwoOpArrangedNarrow(oaknut::CodeGenerator& code, EmitContext& ct
|
||||
} else if constexpr (size == 64) {
|
||||
emit(Qresult->toD().S2(), Qoperand->D2());
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<size>>);
|
||||
//static_assert(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -104,7 +106,7 @@ static void EmitTwoOpArrangedPairWiden(oaknut::CodeGenerator& code, EmitContext&
|
||||
} else if constexpr (size == 32) {
|
||||
emit(Qresult->D2(), Qoperand->S4());
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<size>>);
|
||||
//static_assert(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -119,7 +121,7 @@ static void EmitTwoOpArrangedLower(oaknut::CodeGenerator& code, EmitContext& ctx
|
||||
} else if constexpr (size == 32) {
|
||||
emit(Qresult->toD().S2(), Qoperand->toD().S2());
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<size>>);
|
||||
//static_assert(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -147,7 +149,7 @@ static void EmitThreeOpArranged(oaknut::CodeGenerator& code, EmitContext& ctx, I
|
||||
} else if constexpr (size == 64) {
|
||||
emit(Qresult->D2(), Qa->D2(), Qb->D2());
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<size>>);
|
||||
//static_assert(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -172,7 +174,7 @@ static void EmitThreeOpArrangedWiden(oaknut::CodeGenerator& code, EmitContext& c
|
||||
} else if constexpr (size == 64) {
|
||||
emit(Qresult->Q1(), Qa->toD().D1(), Qb->toD().D1());
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<size>>);
|
||||
//static_assert(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -195,7 +197,7 @@ static void EmitThreeOpArrangedLower(oaknut::CodeGenerator& code, EmitContext& c
|
||||
} else if constexpr (size == 32) {
|
||||
emit(Qresult->toD().S2(), Qa->toD().S2(), Qb->toD().S2());
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<size>>);
|
||||
//static_assert(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -217,7 +219,7 @@ static void EmitSaturatedAccumulate(oaknut::CodeGenerator&, EmitContext& ctx, IR
|
||||
} else if constexpr (size == 64) {
|
||||
emit(Qaccumulator->D2(), Qoperand->D2());
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<size>>);
|
||||
//static_assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -238,7 +240,7 @@ static void EmitImmShift(oaknut::CodeGenerator&, EmitContext& ctx, IR::Inst* ins
|
||||
} else if constexpr (size == 64) {
|
||||
emit(Qresult->D2(), Qoperand->D2(), shift_amount);
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<size>>);
|
||||
//static_assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -266,7 +268,7 @@ static void EmitReduce(oaknut::CodeGenerator&, EmitContext& ctx, IR::Inst* inst,
|
||||
} else if constexpr (size == 64) {
|
||||
emit(Vresult, Qoperand->D2());
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<size>>);
|
||||
//static_assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,8 @@
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <mcl/bit_cast.hpp>
|
||||
#include <bit>
|
||||
#include <numeric>
|
||||
#include <mcl/mp/metavalue/lift_value.hpp>
|
||||
#include <mcl/mp/typelist/cartesian_product.hpp>
|
||||
#include <mcl/mp/typelist/get.hpp>
|
||||
@@ -14,7 +15,6 @@
|
||||
#include <mcl/mp/typelist/list.hpp>
|
||||
#include <mcl/mp/typelist/lower_to_tuple.hpp>
|
||||
#include <mcl/type_traits/function_info.hpp>
|
||||
#include <mcl/type_traits/integer_of_size.hpp>
|
||||
#include <oaknut/oaknut.hpp>
|
||||
|
||||
#include "dynarmic/backend/arm64/a32_jitstate.h"
|
||||
@@ -24,8 +24,7 @@
|
||||
#include "dynarmic/backend/arm64/emit_context.h"
|
||||
#include "dynarmic/backend/arm64/fpsr_manager.h"
|
||||
#include "dynarmic/backend/arm64/reg_alloc.h"
|
||||
#include "dynarmic/common/always_false.h"
|
||||
#include "dynarmic/common/cast_util.h"
|
||||
#include "dynarmic/common/type_util.h"
|
||||
#include "dynarmic/common/fp/fpcr.h"
|
||||
#include "dynarmic/common/fp/fpsr.h"
|
||||
#include "dynarmic/common/fp/info.h"
|
||||
@@ -84,7 +83,7 @@ static void EmitTwoOpArranged(oaknut::CodeGenerator& code, EmitContext& ctx, IR:
|
||||
} else if constexpr (size == 64) {
|
||||
emit(Qresult->D2(), Qa->D2());
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<size>>);
|
||||
//static_assert(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -112,7 +111,7 @@ static void EmitThreeOpArranged(oaknut::CodeGenerator& code, EmitContext& ctx, I
|
||||
} else if constexpr (size == 64) {
|
||||
emit(Qresult->D2(), Qa->D2(), Qb->D2());
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<size>>);
|
||||
//static_assert(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -135,7 +134,7 @@ static void EmitFMA(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* ins
|
||||
} else if constexpr (size == 64) {
|
||||
emit(Qresult->D2(), Qm->D2(), Qn->D2());
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<size>>);
|
||||
//static_assert(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -157,7 +156,7 @@ static void EmitFromFixed(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Ins
|
||||
} else if constexpr (size == 64) {
|
||||
emit(Qto->D2(), Qfrom->D2(), fbits);
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<size>>);
|
||||
//static_assert(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -179,7 +178,7 @@ void EmitToFixed(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst)
|
||||
} else if constexpr (fsize == 64) {
|
||||
return Qto->D2();
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<fsize>>);
|
||||
return Qto->D2(); //static_assert(false);
|
||||
}
|
||||
}();
|
||||
auto Vfrom = [&] {
|
||||
@@ -188,7 +187,7 @@ void EmitToFixed(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst)
|
||||
} else if constexpr (fsize == 64) {
|
||||
return Qfrom->D2();
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<fsize>>);
|
||||
return Qfrom->D2(); //static_assert(false);
|
||||
}
|
||||
}();
|
||||
|
||||
@@ -271,7 +270,7 @@ static void EmitTwoOpFallbackWithoutRegAlloc(oaknut::CodeGenerator& code, EmitCo
|
||||
|
||||
ABI_PushRegisters(code, ABI_CALLER_SAVE & ~(1ull << Qresult.index()), stack_size);
|
||||
|
||||
code.MOV(Xscratch0, mcl::bit_cast<u64>(fn));
|
||||
code.MOV(Xscratch0, std::bit_cast<u64>(fn));
|
||||
code.ADD(X0, SP, 0 * 16);
|
||||
code.ADD(X1, SP, 1 * 16);
|
||||
code.MOV(X2, fpcr);
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2022 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
@@ -12,7 +15,6 @@
|
||||
#include "dynarmic/backend/arm64/emit_context.h"
|
||||
#include "dynarmic/backend/arm64/fpsr_manager.h"
|
||||
#include "dynarmic/backend/arm64/reg_alloc.h"
|
||||
#include "dynarmic/common/always_false.h"
|
||||
#include "dynarmic/ir/basic_block.h"
|
||||
#include "dynarmic/ir/microinstruction.h"
|
||||
#include "dynarmic/ir/opcodes.h"
|
||||
@@ -39,7 +41,7 @@ static void Emit(oaknut::CodeGenerator&, EmitContext& ctx, IR::Inst* inst, EmitF
|
||||
} else if constexpr (size == 64) {
|
||||
emit(Qresult->D2(), Qa->D2(), Qb->D2());
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<size>>);
|
||||
//static_assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,10 +11,10 @@
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <iterator>
|
||||
|
||||
#include <bit>
|
||||
#include <numeric>
|
||||
#include "dynarmic/common/assert.h"
|
||||
#include <mcl/bit/bit_field.hpp>
|
||||
#include <mcl/bit_cast.hpp>
|
||||
#include <mcl/mp/metavalue/lift_value.hpp>
|
||||
#include "dynarmic/common/common_types.h"
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#include "dynarmic/backend/arm64/emit_context.h"
|
||||
#include "dynarmic/backend/arm64/fpsr_manager.h"
|
||||
#include "dynarmic/backend/arm64/verbose_debugging_output.h"
|
||||
#include "dynarmic/common/always_false.h"
|
||||
|
||||
namespace Dynarmic::Backend::Arm64 {
|
||||
|
||||
@@ -246,7 +245,7 @@ void RegAlloc::AssertNoMoreUses() const {
|
||||
}
|
||||
|
||||
void RegAlloc::EmitVerboseDebuggingOutput() {
|
||||
code.MOV(X19, mcl::bit_cast<u64>(&PrintVerboseDebuggingOutputLine)); // Non-volatile register
|
||||
code.MOV(X19, std::bit_cast<u64>(&PrintVerboseDebuggingOutputLine)); // Non-volatile register
|
||||
|
||||
const auto do_location = [&](HostLocInfo& info, HostLocType type, size_t index) {
|
||||
using namespace oaknut::util;
|
||||
@@ -301,7 +300,7 @@ int RegAlloc::GenerateImmediate(const IR::Value& value) {
|
||||
|
||||
return 0;
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<kind>>);
|
||||
return 0;//static_assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -370,7 +369,7 @@ int RegAlloc::RealizeReadImpl(const IR::Value& value) {
|
||||
} else if constexpr (required_kind == HostLoc::Kind::Flags) {
|
||||
ASSERT_FALSE("A simple read from flags is likely a logic error.");
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<required_kind>>);
|
||||
return 0;//static_assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -395,7 +394,7 @@ int RegAlloc::RealizeWriteImpl(const IR::Inst* value) {
|
||||
flags.SetupLocation(value);
|
||||
return 0;
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<kind>>);
|
||||
return 0; //static_assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -416,7 +415,7 @@ int RegAlloc::RealizeReadWriteImpl(const IR::Value& read_value, const IR::Inst*
|
||||
} else if constexpr (kind == HostLoc::Kind::Flags) {
|
||||
ASSERT_FALSE("Incorrect function for ReadWrite of flags");
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<kind>>);
|
||||
return write_loc; //static_assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,19 +11,17 @@
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
|
||||
#include <mcl/macro/architecture.hpp>
|
||||
#include "dynarmic/common/common_types.h"
|
||||
|
||||
#if defined(MCL_ARCHITECTURE_X86_64)
|
||||
#if defined(ARCHITECTURE_x86_64)
|
||||
namespace Dynarmic::Backend::X64 {
|
||||
class BlockOfCode;
|
||||
} // namespace Dynarmic::Backend::X64
|
||||
#elif defined(MCL_ARCHITECTURE_ARM64)
|
||||
#elif defined(ARCHITECTURE_arm64)
|
||||
namespace oaknut {
|
||||
class CodeBlock;
|
||||
} // namespace oaknut
|
||||
#elif defined(MCL_ARCHITECTURE_RISCV)
|
||||
#elif defined(ARCHITECTURE_riscv64)
|
||||
namespace Dynarmic::Backend::RV64 {
|
||||
class CodeBlock;
|
||||
} // namespace Dynarmic::Backend::RV64
|
||||
@@ -33,16 +31,16 @@ class CodeBlock;
|
||||
|
||||
namespace Dynarmic::Backend {
|
||||
|
||||
#if defined(MCL_ARCHITECTURE_X86_64)
|
||||
#if defined(ARCHITECTURE_x86_64)
|
||||
struct FakeCall {
|
||||
u64 call_rip;
|
||||
u64 ret_rip;
|
||||
};
|
||||
#elif defined(MCL_ARCHITECTURE_ARM64)
|
||||
#elif defined(ARCHITECTURE_arm64)
|
||||
struct FakeCall {
|
||||
u64 call_pc;
|
||||
};
|
||||
#elif defined(MCL_ARCHITECTURE_RISCV)
|
||||
#elif defined(ARCHITECTURE_riscv64)
|
||||
struct FakeCall {
|
||||
};
|
||||
#else
|
||||
@@ -54,11 +52,11 @@ public:
|
||||
ExceptionHandler();
|
||||
~ExceptionHandler();
|
||||
|
||||
#if defined(MCL_ARCHITECTURE_X86_64)
|
||||
#if defined(ARCHITECTURE_x86_64)
|
||||
void Register(X64::BlockOfCode& code);
|
||||
#elif defined(MCL_ARCHITECTURE_ARM64)
|
||||
#elif defined(ARCHITECTURE_arm64)
|
||||
void Register(oaknut::CodeBlock& mem, std::size_t mem_size);
|
||||
#elif defined(MCL_ARCHITECTURE_RISCV)
|
||||
#elif defined(ARCHITECTURE_riscv64)
|
||||
void Register(RV64::CodeBlock& mem, std::size_t mem_size);
|
||||
#else
|
||||
# error "Invalid architecture"
|
||||
|
||||
@@ -19,8 +19,7 @@
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include "dynarmic/common/assert.h"
|
||||
#include <mcl/bit_cast.hpp>
|
||||
#include <mcl/macro/architecture.hpp>
|
||||
#include <numeric>
|
||||
#include "dynarmic/common/common_types.h"
|
||||
|
||||
#include "dynarmic/backend/exception_handler.h"
|
||||
@@ -146,7 +145,7 @@ kern_return_t MachHandler::HandleRequest(x86_thread_state64_t* ts) {
|
||||
FakeCall fc = iter->cb(ts->__rip);
|
||||
|
||||
ts->__rsp -= sizeof(u64);
|
||||
*mcl::bit_cast<u64*>(ts->__rsp) = fc.ret_rip;
|
||||
*std::bit_cast<u64*>(ts->__rsp) = fc.ret_rip;
|
||||
ts->__rip = fc.call_rip;
|
||||
|
||||
return KERN_SUCCESS;
|
||||
@@ -271,13 +270,13 @@ ExceptionHandler::~ExceptionHandler() = default;
|
||||
|
||||
#if defined(ARCHITECTURE_x86_64)
|
||||
void ExceptionHandler::Register(X64::BlockOfCode& code) {
|
||||
const u64 code_begin = mcl::bit_cast<u64>(code.getCode());
|
||||
const u64 code_begin = std::bit_cast<u64>(code.getCode());
|
||||
const u64 code_end = code_begin + code.GetTotalCodeSize();
|
||||
impl = std::make_unique<Impl>(code_begin, code_end);
|
||||
}
|
||||
#elif defined(ARCHITECTURE_arm64)
|
||||
void ExceptionHandler::Register(oaknut::CodeBlock& mem, std::size_t size) {
|
||||
const u64 code_begin = mcl::bit_cast<u64>(mem.ptr());
|
||||
const u64 code_begin = std::bit_cast<u64>(mem.ptr());
|
||||
const u64 code_end = code_begin + size;
|
||||
impl = std::make_unique<Impl>(code_begin, code_end);
|
||||
}
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <mcl/macro/architecture.hpp>
|
||||
|
||||
#if defined(ARCHITECTURE_x86_64)
|
||||
# include "dynarmic/backend/x64/mig/mach_exc_server.c"
|
||||
#elif defined(ARCHITECTURE_arm64)
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
#else
|
||||
# error "Invalid architecture"
|
||||
#endif
|
||||
#include <mcl/bit_cast.hpp>
|
||||
#include <numeric>
|
||||
|
||||
namespace Dynarmic::Backend {
|
||||
|
||||
@@ -122,7 +122,7 @@ void SigHandler::SigAction(int sig, siginfo_t* info, void* raw_context) {
|
||||
if (auto const iter = sig_handler->FindCodeBlockInfo(CTX_RIP); iter != sig_handler->code_block_infos.end()) {
|
||||
FakeCall fc = iter->second.cb(CTX_RIP);
|
||||
CTX_RSP -= sizeof(u64);
|
||||
*mcl::bit_cast<u64*>(CTX_RSP) = fc.ret_rip;
|
||||
*std::bit_cast<u64*>(CTX_RSP) = fc.ret_rip;
|
||||
CTX_RIP = fc.call_rip;
|
||||
return;
|
||||
}
|
||||
@@ -187,17 +187,17 @@ private:
|
||||
ExceptionHandler::ExceptionHandler() = default;
|
||||
ExceptionHandler::~ExceptionHandler() = default;
|
||||
|
||||
#if defined(MCL_ARCHITECTURE_X86_64)
|
||||
#if defined(ARCHITECTURE_x86_64)
|
||||
void ExceptionHandler::Register(X64::BlockOfCode& code) {
|
||||
impl = std::make_unique<Impl>(mcl::bit_cast<u64>(code.getCode()), code.GetTotalCodeSize());
|
||||
impl = std::make_unique<Impl>(std::bit_cast<u64>(code.getCode()), code.GetTotalCodeSize());
|
||||
}
|
||||
#elif defined(MCL_ARCHITECTURE_ARM64)
|
||||
#elif defined(ARCHITECTURE_arm64)
|
||||
void ExceptionHandler::Register(oaknut::CodeBlock& mem, std::size_t size) {
|
||||
impl = std::make_unique<Impl>(mcl::bit_cast<u64>(mem.ptr()), size);
|
||||
impl = std::make_unique<Impl>(std::bit_cast<u64>(mem.ptr()), size);
|
||||
}
|
||||
#elif defined(MCL_ARCHITECTURE_RISCV)
|
||||
#elif defined(ARCHITECTURE_riscv64)
|
||||
void ExceptionHandler::Register(RV64::CodeBlock& mem, std::size_t size) {
|
||||
impl = std::make_unique<Impl>(mcl::bit_cast<u64>(mem.ptr<u64>()), size);
|
||||
impl = std::make_unique<Impl>(std::bit_cast<u64>(mem.ptr<u64>()), size);
|
||||
}
|
||||
#else
|
||||
# error "Invalid architecture"
|
||||
|
||||
@@ -6,8 +6,6 @@
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <mcl/macro/architecture.hpp>
|
||||
|
||||
#if defined(ARCHITECTURE_x86_64)
|
||||
# include "dynarmic/backend/x64/exception_handler_windows.cpp"
|
||||
#elif defined(ARCHITECTURE_arm64)
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
#include <mcl/mp/metavalue/lift_value.hpp>
|
||||
#include "dynarmic/common/common_types.h"
|
||||
|
||||
#include "dynarmic/common/always_false.h"
|
||||
|
||||
namespace Dynarmic::Backend::RV64 {
|
||||
|
||||
@@ -164,7 +163,7 @@ u32 RegAlloc::GenerateImmediate(const IR::Value& value) {
|
||||
} else if constexpr (kind == HostLoc::Kind::Fpr) {
|
||||
UNIMPLEMENTED();
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<kind>>);
|
||||
//static_assert(false);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -227,7 +226,7 @@ u32 RegAlloc::RealizeReadImpl(const IR::Value& value) {
|
||||
fprs[new_location_index].realized = true;
|
||||
return new_location_index;
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<required_kind>>);
|
||||
return 0; //static_assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -254,7 +253,7 @@ u32 RegAlloc::RealizeWriteImpl(const IR::Inst* value) {
|
||||
setup_location(fprs[new_location_index]);
|
||||
return new_location_index;
|
||||
} else {
|
||||
static_assert(Common::always_false_v<mcl::mp::lift_value<required_kind>>);
|
||||
return 0;//static_assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2022 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
@@ -10,7 +13,6 @@
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include <fmt/ostream.h>
|
||||
#include <mcl/type_traits/integer_of_size.hpp>
|
||||
#include <xbyak/xbyak.h>
|
||||
|
||||
#include "dynarmic/backend/x64/a32_emit_x64.h"
|
||||
|
||||
@@ -9,11 +9,12 @@
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <bit>
|
||||
|
||||
#include <boost/icl/interval_set.hpp>
|
||||
#include <fmt/format.h>
|
||||
#include "dynarmic/common/assert.h"
|
||||
#include <mcl/bit_cast.hpp>
|
||||
#include <numeric>
|
||||
#include <mcl/scope_exit.hpp>
|
||||
#include "dynarmic/common/common_types.h"
|
||||
|
||||
@@ -47,7 +48,7 @@ static RunCodeCallbacks GenRunCodeCallbacks(A32::UserCallbacks* cb, CodePtr (*Lo
|
||||
static std::function<void(BlockOfCode&)> GenRCP(const A32::UserConfig& conf) {
|
||||
return [conf](BlockOfCode& code) {
|
||||
if (conf.page_table) {
|
||||
code.mov(code.r14, mcl::bit_cast<u64>(conf.page_table));
|
||||
code.mov(code.r14, std::bit_cast<u64>(conf.page_table));
|
||||
}
|
||||
if (conf.fastmem_pointer) {
|
||||
code.mov(code.r13, *conf.fastmem_pointer);
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
#include "dynarmic/common/assert.h"
|
||||
#include <mcl/scope_exit.hpp>
|
||||
#include "dynarmic/common/common_types.h"
|
||||
#include <mcl/type_traits/integer_of_size.hpp>
|
||||
#include <boost/container/static_vector.hpp>
|
||||
|
||||
#include "dynarmic/backend/x64/a64_jitstate.h"
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2022 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
@@ -10,7 +13,6 @@
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include <fmt/ostream.h>
|
||||
#include <mcl/type_traits/integer_of_size.hpp>
|
||||
#include <xbyak/xbyak.h>
|
||||
|
||||
#include "dynarmic/backend/x64/a64_emit_x64.h"
|
||||
|
||||
@@ -9,10 +9,11 @@
|
||||
#include <cstring>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <bit>
|
||||
|
||||
#include <boost/icl/interval_set.hpp>
|
||||
#include "dynarmic/common/assert.h"
|
||||
#include <mcl/bit_cast.hpp>
|
||||
#include <numeric>
|
||||
#include <mcl/scope_exit.hpp>
|
||||
|
||||
#include "dynarmic/backend/x64/a64_emit_x64.h"
|
||||
@@ -43,7 +44,7 @@ static RunCodeCallbacks GenRunCodeCallbacks(A64::UserCallbacks* cb, CodePtr (*Lo
|
||||
static std::function<void(BlockOfCode&)> GenRCP(const A64::UserConfig& conf) {
|
||||
return [conf](BlockOfCode& code) {
|
||||
if (conf.page_table) {
|
||||
code.mov(code.r14, mcl::bit_cast<u64>(conf.page_table));
|
||||
code.mov(code.r14, std::bit_cast<u64>(conf.page_table));
|
||||
}
|
||||
if (conf.fastmem_pointer) {
|
||||
code.mov(code.r13, *conf.fastmem_pointer);
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
#include "dynarmic/backend/x64/constant_pool.h"
|
||||
#include "dynarmic/backend/x64/host_feature.h"
|
||||
#include "dynarmic/backend/x64/jitstate_info.h"
|
||||
#include "dynarmic/common/cast_util.h"
|
||||
#include "dynarmic/common/type_util.h"
|
||||
#include "dynarmic/interface/halt_reason.h"
|
||||
#include "dynarmic/ir/cond.h"
|
||||
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
#include <cstring>
|
||||
#include <utility>
|
||||
|
||||
#include <mcl/bit_cast.hpp>
|
||||
#include <bit>
|
||||
#include <numeric>
|
||||
#include "dynarmic/common/common_types.h"
|
||||
#include <mcl/type_traits/function_info.hpp>
|
||||
|
||||
@@ -42,7 +42,7 @@ ArgCallback DevirtualizeGeneric(mcl::class_type<decltype(mfp)>* this_) {
|
||||
template<auto mfp>
|
||||
ArgCallback DevirtualizeWindows(mcl::class_type<decltype(mfp)>* this_) {
|
||||
static_assert(sizeof(mfp) == 8);
|
||||
return ArgCallback{mcl::bit_cast<u64>(mfp), reinterpret_cast<u64>(this_)};
|
||||
return ArgCallback{std::bit_cast<u64>(mfp), reinterpret_cast<u64>(this_)};
|
||||
}
|
||||
|
||||
template<auto mfp>
|
||||
@@ -53,7 +53,7 @@ ArgCallback DevirtualizeItanium(mcl::class_type<decltype(mfp)>* this_) {
|
||||
u64 ptr;
|
||||
/// The required adjustment to `this`, prior to the call.
|
||||
u64 adj;
|
||||
} mfp_struct = mcl::bit_cast<MemberFunctionPointer>(mfp);
|
||||
} mfp_struct = std::bit_cast<MemberFunctionPointer>(mfp);
|
||||
|
||||
static_assert(sizeof(MemberFunctionPointer) == 16);
|
||||
static_assert(sizeof(MemberFunctionPointer) == sizeof(mfp));
|
||||
@@ -61,8 +61,8 @@ ArgCallback DevirtualizeItanium(mcl::class_type<decltype(mfp)>* this_) {
|
||||
u64 fn_ptr = mfp_struct.ptr;
|
||||
u64 this_ptr = reinterpret_cast<u64>(this_) + mfp_struct.adj;
|
||||
if (mfp_struct.ptr & 1) {
|
||||
u64 vtable = mcl::bit_cast_pointee<u64>(this_ptr);
|
||||
fn_ptr = mcl::bit_cast_pointee<u64>(vtable + fn_ptr - 1);
|
||||
u64 vtable = std::bit_cast<u64>(this_ptr);
|
||||
fn_ptr = std::bit_cast<u64>(vtable + fn_ptr - 1);
|
||||
}
|
||||
return ArgCallback{fn_ptr, this_ptr};
|
||||
}
|
||||
|
||||
@@ -18,14 +18,13 @@
|
||||
#include <mcl/mp/typelist/list.hpp>
|
||||
#include <mcl/mp/typelist/lower_to_tuple.hpp>
|
||||
#include "dynarmic/common/common_types.h"
|
||||
#include <mcl/type_traits/integer_of_size.hpp>
|
||||
#include <xbyak/xbyak.h>
|
||||
|
||||
#include "dynarmic/backend/x64/abi.h"
|
||||
#include "dynarmic/backend/x64/block_of_code.h"
|
||||
#include "dynarmic/backend/x64/constants.h"
|
||||
#include "dynarmic/backend/x64/emit_x64.h"
|
||||
#include "dynarmic/common/cast_util.h"
|
||||
#include "dynarmic/common/type_util.h"
|
||||
#include "dynarmic/common/fp/fpcr.h"
|
||||
#include "dynarmic/common/fp/fpsr.h"
|
||||
#include "dynarmic/common/fp/info.h"
|
||||
@@ -36,22 +35,8 @@
|
||||
#include "dynarmic/ir/basic_block.h"
|
||||
#include "dynarmic/ir/microinstruction.h"
|
||||
|
||||
#define FCODE(NAME) \
|
||||
[&code](auto... args) { \
|
||||
if constexpr (fsize == 32) { \
|
||||
code.NAME##s(args...); \
|
||||
} else { \
|
||||
code.NAME##d(args...); \
|
||||
} \
|
||||
}
|
||||
#define ICODE(NAME) \
|
||||
[&code](auto... args) { \
|
||||
if constexpr (fsize == 32) { \
|
||||
code.NAME##d(args...); \
|
||||
} else { \
|
||||
code.NAME##q(args...); \
|
||||
} \
|
||||
}
|
||||
#define FCODE(NAME) [&](auto... args) { if (fsize == 32) code.NAME##s(args...); else code.NAME##d(args...); }
|
||||
#define ICODE(NAME) [&](auto... args) { if (fsize == 32) code.NAME##d(args...); else code.NAME##q(args...); }
|
||||
|
||||
namespace Dynarmic::Backend::X64 {
|
||||
|
||||
@@ -105,7 +90,7 @@ void ForceDenormalsToZero(BlockOfCode& code, std::initializer_list<Xbyak::Xmm> t
|
||||
for (const Xbyak::Xmm& xmm : to_daz) {
|
||||
code.movaps(xmm0, code.Const(xword, fsize == 32 ? f32_non_sign_mask : f64_non_sign_mask));
|
||||
code.andps(xmm0, xmm);
|
||||
if constexpr (fsize == 32) {
|
||||
if (fsize == 32) {
|
||||
code.pcmpgtd(xmm0, code.Const(xword, f32_smallest_normal - 1));
|
||||
} else if (code.HasHostFeature(HostFeature::SSE42)) {
|
||||
code.pcmpgtq(xmm0, code.Const(xword, f64_smallest_normal - 1));
|
||||
@@ -120,13 +105,11 @@ void ForceDenormalsToZero(BlockOfCode& code, std::initializer_list<Xbyak::Xmm> t
|
||||
|
||||
template<size_t fsize>
|
||||
void DenormalsAreZero(BlockOfCode& code, EmitContext& ctx, std::initializer_list<Xbyak::Xmm> to_daz) {
|
||||
if (ctx.FPCR().FZ()) {
|
||||
if (ctx.FPCR().FZ())
|
||||
ForceDenormalsToZero<fsize>(code, to_daz);
|
||||
}
|
||||
}
|
||||
|
||||
template<size_t fsize>
|
||||
void ZeroIfNaN(BlockOfCode& code, Xbyak::Xmm xmm_value, Xbyak::Xmm xmm_scratch) {
|
||||
void ZeroIfNaN(BlockOfCode& code, Xbyak::Xmm xmm_value, Xbyak::Xmm xmm_scratch, size_t fsize) {
|
||||
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
|
||||
constexpr u32 nan_to_zero = FixupLUT(FpFixup::PosZero,
|
||||
FpFixup::PosZero);
|
||||
@@ -141,8 +124,7 @@ void ZeroIfNaN(BlockOfCode& code, Xbyak::Xmm xmm_value, Xbyak::Xmm xmm_scratch)
|
||||
}
|
||||
}
|
||||
|
||||
template<size_t fsize>
|
||||
void ForceToDefaultNaN(BlockOfCode& code, Xbyak::Xmm result) {
|
||||
void ForceToDefaultNaN(BlockOfCode& code, Xbyak::Xmm result, size_t fsize) {
|
||||
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
|
||||
const Xbyak::Opmask nan_mask = k1;
|
||||
FCODE(vfpclasss)(nan_mask, result, u8(FpClass::QNaN | FpClass::SNaN));
|
||||
@@ -208,7 +190,7 @@ void PostProcessNaN(BlockOfCode& code, Xbyak::Xmm result, Xbyak::Xmm tmp) {
|
||||
// We allow for the case where op1 and result are the same register. We do not read from op1 once result is written to.
|
||||
template<size_t fsize>
|
||||
void EmitPostProcessNaNs(BlockOfCode& code, Xbyak::Xmm result, Xbyak::Xmm op1, Xbyak::Xmm op2, Xbyak::Reg64 tmp, Xbyak::Label end) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
constexpr FPT exponent_mask = FP::FPInfo<FPT>::exponent_mask;
|
||||
constexpr FPT mantissa_msb = FP::FPInfo<FPT>::mantissa_msb;
|
||||
constexpr u8 mantissa_msb_bit = static_cast<u8>(FP::FPInfo<FPT>::explicit_mantissa_width - 1);
|
||||
@@ -236,7 +218,7 @@ void EmitPostProcessNaNs(BlockOfCode& code, Xbyak::Xmm result, Xbyak::Xmm op1, X
|
||||
}
|
||||
|
||||
constexpr size_t shift = fsize == 32 ? 0 : 48;
|
||||
if constexpr (fsize == 32) {
|
||||
if (fsize == 32) {
|
||||
code.movd(tmp.cvt32(), xmm0);
|
||||
} else {
|
||||
// We do this to avoid requiring 64-bit immediates
|
||||
@@ -252,7 +234,7 @@ void EmitPostProcessNaNs(BlockOfCode& code, Xbyak::Xmm result, Xbyak::Xmm op1, X
|
||||
// op1 == QNaN && op2 == SNaN <<< The problematic case
|
||||
// op1 == QNaN && op2 == Inf
|
||||
|
||||
if constexpr (fsize == 32) {
|
||||
if (fsize == 32) {
|
||||
code.movd(tmp.cvt32(), op2);
|
||||
code.shl(tmp.cvt32(), 32 - mantissa_msb_bit);
|
||||
} else {
|
||||
@@ -291,7 +273,7 @@ void FPTwoOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, Function fn) {
|
||||
if (ctx.HasOptimization(OptimizationFlag::Unsafe_InaccurateNaN)) {
|
||||
// Do nothing
|
||||
} else if (ctx.FPCR().DN()) {
|
||||
ForceToDefaultNaN<fsize>(code, result);
|
||||
ForceToDefaultNaN(code, result, fsize);
|
||||
} else {
|
||||
PostProcessNaN<fsize>(code, result, xmm0);
|
||||
}
|
||||
@@ -302,7 +284,7 @@ void FPTwoOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, Function fn) {
|
||||
|
||||
template<size_t fsize, typename Function>
|
||||
void FPThreeOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, Function fn) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
|
||||
@@ -317,7 +299,7 @@ void FPThreeOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, Function fn)
|
||||
}
|
||||
|
||||
if (!ctx.HasOptimization(OptimizationFlag::Unsafe_InaccurateNaN)) {
|
||||
ForceToDefaultNaN<fsize>(code, result);
|
||||
ForceToDefaultNaN(code, result, fsize);
|
||||
}
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
@@ -361,7 +343,7 @@ void FPThreeOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, Function fn)
|
||||
|
||||
template<size_t fsize>
|
||||
void FPAbs(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
constexpr FPT non_sign_mask = FP::FPInfo<FPT>::sign_mask - FPT(1u);
|
||||
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
@@ -387,7 +369,7 @@ void EmitX64::EmitFPAbs64(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
template<size_t fsize>
|
||||
void FPNeg(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
constexpr FPT sign_mask = FP::FPInfo<FPT>::sign_mask;
|
||||
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
@@ -442,7 +424,7 @@ static void EmitFPMinMax(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
FCODE(ucomis)(result, operand);
|
||||
code.jz(*equal, code.T_NEAR);
|
||||
if constexpr (is_max) {
|
||||
if (is_max) {
|
||||
FCODE(maxs)(result, operand);
|
||||
} else {
|
||||
FCODE(mins)(result, operand);
|
||||
@@ -454,7 +436,7 @@ static void EmitFPMinMax(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
code.L(*equal);
|
||||
code.jp(nan);
|
||||
if constexpr (is_max) {
|
||||
if (is_max) {
|
||||
code.andps(result, operand);
|
||||
} else {
|
||||
code.orps(result, operand);
|
||||
@@ -477,7 +459,7 @@ static void EmitFPMinMax(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
template<size_t fsize, bool is_max>
|
||||
static inline void EmitFPMinMaxNumeric(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) noexcept {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
constexpr FPT default_nan = FP::FPInfo<FPT>::DefaultNaN();
|
||||
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
@@ -502,7 +484,7 @@ static inline void EmitFPMinMaxNumeric(BlockOfCode& code, EmitContext& ctx, IR::
|
||||
tmp.setBit(fsize);
|
||||
|
||||
const auto move_to_tmp = [=, &code](const Xbyak::Xmm& xmm) {
|
||||
if constexpr (fsize == 32) {
|
||||
if (fsize == 32) {
|
||||
code.movd(tmp.cvt32(), xmm);
|
||||
} else {
|
||||
code.movq(tmp.cvt64(), xmm);
|
||||
@@ -513,7 +495,7 @@ static inline void EmitFPMinMaxNumeric(BlockOfCode& code, EmitContext& ctx, IR::
|
||||
|
||||
FCODE(ucomis)(op1, op2);
|
||||
code.jz(*z, code.T_NEAR);
|
||||
if constexpr (is_max) {
|
||||
if (is_max) {
|
||||
FCODE(maxs)(op2, op1);
|
||||
} else {
|
||||
FCODE(mins)(op2, op1);
|
||||
@@ -527,7 +509,7 @@ static inline void EmitFPMinMaxNumeric(BlockOfCode& code, EmitContext& ctx, IR::
|
||||
|
||||
code.L(*z);
|
||||
code.jp(nan);
|
||||
if constexpr (is_max) {
|
||||
if (is_max) {
|
||||
code.andps(op2, op1);
|
||||
} else {
|
||||
code.orps(op2, op1);
|
||||
@@ -629,12 +611,12 @@ void EmitX64::EmitFPMul64(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
template<size_t fsize, bool negate_product>
|
||||
static void EmitFPMulAdd(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
const auto fallback_fn = negate_product ? &FP::FPMulSub<FPT> : &FP::FPMulAdd<FPT>;
|
||||
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
|
||||
if constexpr (fsize != 16) {
|
||||
if (fsize != 16) {
|
||||
const bool needs_rounding_correction = ctx.FPCR().FZ();
|
||||
const bool needs_nan_correction = !ctx.FPCR().DN();
|
||||
|
||||
@@ -643,13 +625,13 @@ static void EmitFPMulAdd(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
|
||||
const Xbyak::Xmm operand3 = ctx.reg_alloc.UseXmm(args[2]);
|
||||
|
||||
if constexpr (negate_product) {
|
||||
if (negate_product) {
|
||||
FCODE(vfnmadd231s)(result, operand2, operand3);
|
||||
} else {
|
||||
FCODE(vfmadd231s)(result, operand2, operand3);
|
||||
}
|
||||
if (ctx.FPCR().DN()) {
|
||||
ForceToDefaultNaN<fsize>(code, result);
|
||||
ForceToDefaultNaN(code, result, fsize);
|
||||
}
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
@@ -665,7 +647,7 @@ static void EmitFPMulAdd(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
||||
|
||||
code.movaps(result, operand1);
|
||||
if constexpr (negate_product) {
|
||||
if (negate_product) {
|
||||
FCODE(vfnmadd231s)(result, operand2, operand3);
|
||||
} else {
|
||||
FCODE(vfmadd231s)(result, operand2, operand3);
|
||||
@@ -686,9 +668,8 @@ static void EmitFPMulAdd(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
if (ctx.FPCR().DN()) {
|
||||
ForceToDefaultNaN<fsize>(code, result);
|
||||
}
|
||||
if (ctx.FPCR().DN())
|
||||
ForceToDefaultNaN(code, result, fsize);
|
||||
code.L(*end);
|
||||
|
||||
ctx.deferred_emits.emplace_back([=, &code, &ctx] {
|
||||
@@ -769,7 +750,7 @@ static void EmitFPMulAdd(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
code.ptest(operand2, xmm0);
|
||||
code.jnz(op2_done);
|
||||
code.vorps(result, operand2, xmm0);
|
||||
if constexpr (negate_product) {
|
||||
if (negate_product) {
|
||||
code.xorps(result, code.Const(xword, FP::FPInfo<FPT>::sign_mask));
|
||||
}
|
||||
code.jmp(*end);
|
||||
@@ -785,7 +766,7 @@ static void EmitFPMulAdd(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
// at this point, all SNaNs have been handled
|
||||
// if op1 was not a QNaN and op2 is, negate the result
|
||||
if constexpr (negate_product) {
|
||||
if (negate_product) {
|
||||
FCODE(ucomis)(operand1, operand1);
|
||||
code.jp(*end);
|
||||
FCODE(ucomis)(operand2, operand2);
|
||||
@@ -806,7 +787,7 @@ static void EmitFPMulAdd(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseScratchXmm(args[1]);
|
||||
const Xbyak::Xmm operand3 = ctx.reg_alloc.UseXmm(args[2]);
|
||||
|
||||
if constexpr (negate_product) {
|
||||
if (negate_product) {
|
||||
code.xorps(operand2, code.Const(xword, FP::FPInfo<FPT>::sign_mask));
|
||||
}
|
||||
FCODE(muls)(operand2, operand3);
|
||||
@@ -857,7 +838,7 @@ void EmitX64::EmitFPMulSub64(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
template<size_t fsize>
|
||||
static void EmitFPMulX(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
|
||||
@@ -917,9 +898,9 @@ void EmitX64::EmitFPMulX64(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
template<size_t fsize>
|
||||
static void EmitFPRecipEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
|
||||
if constexpr (fsize != 16) {
|
||||
if (fsize != 16) {
|
||||
if (ctx.HasOptimization(OptimizationFlag::Unsafe_ReducedErrorFP)) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
const Xbyak::Xmm operand = ctx.reg_alloc.UseXmm(args[0]);
|
||||
@@ -928,7 +909,7 @@ static void EmitFPRecipEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* i
|
||||
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
|
||||
FCODE(vrcp14s)(result, operand, operand);
|
||||
} else {
|
||||
if constexpr (fsize == 32) {
|
||||
if (fsize == 32) {
|
||||
code.rcpss(result, operand);
|
||||
} else {
|
||||
code.cvtsd2ss(result, operand);
|
||||
@@ -963,7 +944,7 @@ void EmitX64::EmitFPRecipEstimate64(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
template<size_t fsize>
|
||||
static void EmitFPRecipExponent(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
ctx.reg_alloc.HostCall(inst, args[0]);
|
||||
@@ -986,11 +967,11 @@ void EmitX64::EmitFPRecipExponent64(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
template<size_t fsize>
|
||||
static void EmitFPRecipStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
|
||||
if constexpr (fsize != 16) {
|
||||
if (fsize != 16) {
|
||||
if (code.HasHostFeature(HostFeature::FMA) && ctx.HasOptimization(OptimizationFlag::Unsafe_InaccurateNaN)) {
|
||||
Xbyak::Label end, fallback;
|
||||
|
||||
@@ -1123,9 +1104,9 @@ void EmitX64::EmitFPRoundInt64(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
template<size_t fsize>
|
||||
static void EmitFPRSqrtEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
|
||||
if constexpr (fsize != 16) {
|
||||
if (fsize != 16) {
|
||||
if (ctx.HasOptimization(OptimizationFlag::Unsafe_ReducedErrorFP)) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
const Xbyak::Xmm operand = ctx.reg_alloc.UseXmm(args[0]);
|
||||
@@ -1134,7 +1115,7 @@ static void EmitFPRSqrtEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* i
|
||||
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
|
||||
FCODE(vrsqrt14s)(result, operand, operand);
|
||||
} else {
|
||||
if constexpr (fsize == 32) {
|
||||
if (fsize == 32) {
|
||||
code.rsqrtss(result, operand);
|
||||
} else {
|
||||
code.cvtsd2ss(result, operand);
|
||||
@@ -1180,7 +1161,7 @@ static void EmitFPRSqrtEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* i
|
||||
bool needs_fallback = false;
|
||||
|
||||
code.L(*bad_values);
|
||||
if constexpr (fsize == 32) {
|
||||
if (fsize == 32) {
|
||||
code.movd(tmp, operand);
|
||||
|
||||
if (!ctx.FPCR().FZ()) {
|
||||
@@ -1302,11 +1283,11 @@ void EmitX64::EmitFPRSqrtEstimate64(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
template<size_t fsize>
|
||||
static void EmitFPRSqrtStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
|
||||
if constexpr (fsize != 16) {
|
||||
if (fsize != 16) {
|
||||
if (code.HasHostFeature(HostFeature::FMA | HostFeature::AVX) && ctx.HasOptimization(OptimizationFlag::Unsafe_InaccurateNaN)) {
|
||||
const Xbyak::Xmm operand1 = ctx.reg_alloc.UseXmm(args[0]);
|
||||
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
|
||||
@@ -1485,9 +1466,8 @@ void EmitX64::EmitFPHalfToDouble(EmitContext& ctx, IR::Inst* inst) {
|
||||
// Double-conversion here is acceptable as this is expanding precision.
|
||||
code.vcvtph2ps(result, value);
|
||||
code.vcvtps2pd(result, result);
|
||||
if (ctx.FPCR().DN()) {
|
||||
ForceToDefaultNaN<64>(code, result);
|
||||
}
|
||||
if (ctx.FPCR().DN())
|
||||
ForceToDefaultNaN(code, result, 64);
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
return;
|
||||
@@ -1509,9 +1489,8 @@ void EmitX64::EmitFPHalfToSingle(EmitContext& ctx, IR::Inst* inst) {
|
||||
const Xbyak::Xmm value = ctx.reg_alloc.UseXmm(args[0]);
|
||||
|
||||
code.vcvtph2ps(result, value);
|
||||
if (ctx.FPCR().DN()) {
|
||||
ForceToDefaultNaN<32>(code, result);
|
||||
}
|
||||
if (ctx.FPCR().DN())
|
||||
ForceToDefaultNaN(code, result, 32);
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
return;
|
||||
@@ -1519,23 +1498,22 @@ void EmitX64::EmitFPHalfToSingle(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
ctx.reg_alloc.HostCall(inst, args[0]);
|
||||
code.mov(code.ABI_PARAM2.cvt32(), ctx.FPCR().Value());
|
||||
code.mov(code.ABI_PARAM3.cvt32(), static_cast<u32>(rounding_mode));
|
||||
code.mov(code.ABI_PARAM3.cvt32(), u32(rounding_mode));
|
||||
code.lea(code.ABI_PARAM4, code.ptr[code.ABI_JIT_PTR + code.GetJitStateInfo().offsetof_fpsr_exc]);
|
||||
code.CallFunction(&FP::FPConvert<u32, u16>);
|
||||
}
|
||||
|
||||
void EmitX64::EmitFPSingleToDouble(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
const auto rounding_mode = static_cast<FP::RoundingMode>(args[1].GetImmediateU8());
|
||||
const auto rounding_mode = FP::RoundingMode(args[1].GetImmediateU8());
|
||||
|
||||
// We special-case the non-IEEE-defined ToOdd rounding mode.
|
||||
if (rounding_mode == ctx.FPCR().RMode() && rounding_mode != FP::RoundingMode::ToOdd) {
|
||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||
|
||||
code.cvtss2sd(result, result);
|
||||
if (ctx.FPCR().DN()) {
|
||||
ForceToDefaultNaN<64>(code, result);
|
||||
}
|
||||
if (ctx.FPCR().DN())
|
||||
ForceToDefaultNaN(code, result, 64);
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
} else {
|
||||
ctx.reg_alloc.HostCall(inst, args[0]);
|
||||
@@ -1553,12 +1531,9 @@ void EmitX64::EmitFPSingleToHalf(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
if (code.HasHostFeature(HostFeature::F16C) && !ctx.FPCR().AHP() && !ctx.FPCR().FZ16()) {
|
||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||
|
||||
if (ctx.FPCR().DN()) {
|
||||
ForceToDefaultNaN<32>(code, result);
|
||||
}
|
||||
code.vcvtps2ph(result, result, static_cast<u8>(*round_imm));
|
||||
|
||||
if (ctx.FPCR().DN())
|
||||
ForceToDefaultNaN(code, result, 32);
|
||||
code.vcvtps2ph(result, result, u8(*round_imm));
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
return;
|
||||
}
|
||||
@@ -1586,21 +1561,18 @@ void EmitX64::EmitFPDoubleToHalf(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
void EmitX64::EmitFPDoubleToSingle(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
const auto rounding_mode = static_cast<FP::RoundingMode>(args[1].GetImmediateU8());
|
||||
|
||||
const auto rounding_mode = FP::RoundingMode(args[1].GetImmediateU8());
|
||||
// We special-case the non-IEEE-defined ToOdd rounding mode.
|
||||
if (rounding_mode == ctx.FPCR().RMode() && rounding_mode != FP::RoundingMode::ToOdd) {
|
||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||
|
||||
code.cvtsd2ss(result, result);
|
||||
if (ctx.FPCR().DN()) {
|
||||
ForceToDefaultNaN<32>(code, result);
|
||||
}
|
||||
if (ctx.FPCR().DN())
|
||||
ForceToDefaultNaN(code, result, 32);
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
} else {
|
||||
ctx.reg_alloc.HostCall(inst, args[0]);
|
||||
code.mov(code.ABI_PARAM2.cvt32(), ctx.FPCR().Value());
|
||||
code.mov(code.ABI_PARAM3.cvt32(), static_cast<u32>(rounding_mode));
|
||||
code.mov(code.ABI_PARAM3.cvt32(), u32(rounding_mode));
|
||||
code.lea(code.ABI_PARAM4, code.ptr[code.ABI_JIT_PTR + code.GetJitStateInfo().offsetof_fpsr_exc]);
|
||||
code.CallFunction(&FP::FPConvert<u32, u64>);
|
||||
}
|
||||
@@ -1615,7 +1587,7 @@ void EmitX64::EmitFPDoubleToSingle(EmitContext& ctx, IR::Inst* inst) {
|
||||
/// Better than spamming thousands of templates aye?
|
||||
template<size_t fsize>
|
||||
static u64 EmitFPToFixedThunk(u64 input, FP::FPSR& fpsr, FP::FPCR fpcr, u32 extra_args) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
auto const unsigned_ = ((extra_args >> 24) & 0xff) != 0;
|
||||
auto const isize = ((extra_args >> 16) & 0xff);
|
||||
auto const rounding = FP::RoundingMode((extra_args >> 8) & 0xff);
|
||||
@@ -1630,7 +1602,7 @@ static void EmitFPToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
const size_t fbits = args[1].GetImmediateU8();
|
||||
const auto rounding_mode = FP::RoundingMode(args[2].GetImmediateU8());
|
||||
|
||||
if constexpr (fsize != 16) {
|
||||
if (fsize != 16) {
|
||||
const auto round_imm = ConvertRoundingModeToX64Immediate(rounding_mode);
|
||||
|
||||
// cvttsd2si truncates during operation so rounding (and thus SSE4.1) not required
|
||||
@@ -1640,7 +1612,7 @@ static void EmitFPToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
const Xbyak::Xmm src = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||
const Xbyak::Reg64 result = ctx.reg_alloc.ScratchGpr().cvt64();
|
||||
|
||||
if constexpr (fsize == 64) {
|
||||
if (fsize == 64) {
|
||||
if (fbits != 0) {
|
||||
const u64 scale_factor = static_cast<u64>((fbits + 1023) << 52);
|
||||
code.mulsd(src, code.Const(xword, scale_factor));
|
||||
@@ -1662,13 +1634,13 @@ static void EmitFPToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
code.cvtss2sd(src, src);
|
||||
}
|
||||
|
||||
if constexpr (isize == 64) {
|
||||
if (isize == 64) {
|
||||
const Xbyak::Xmm scratch = ctx.reg_alloc.ScratchXmm();
|
||||
|
||||
if (!unsigned_) {
|
||||
SharedLabel saturate_max = GenSharedLabel(), end = GenSharedLabel();
|
||||
|
||||
ZeroIfNaN<64>(code, src, scratch);
|
||||
ZeroIfNaN(code, src, scratch, 64);
|
||||
|
||||
code.movsd(scratch, code.Const(xword, f64_max_s64_lim));
|
||||
code.comisd(scratch, src);
|
||||
@@ -1706,11 +1678,11 @@ static void EmitFPToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
code.sar(result2, 63);
|
||||
code.or_(result, result2);
|
||||
}
|
||||
} else if constexpr (isize == 32) {
|
||||
} else if (isize == 32) {
|
||||
if (!unsigned_) {
|
||||
const Xbyak::Xmm scratch = ctx.reg_alloc.ScratchXmm();
|
||||
|
||||
ZeroIfNaN<64>(code, src, scratch);
|
||||
ZeroIfNaN(code, src, scratch, 64);
|
||||
code.minsd(src, code.Const(xword, f64_max_s32));
|
||||
// maxsd not required as cvttsd2si results in 0x8000'0000 when out of range
|
||||
code.cvttsd2si(result.cvt32(), src); // 32 bit gpr
|
||||
@@ -1723,7 +1695,7 @@ static void EmitFPToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
} else {
|
||||
const Xbyak::Xmm scratch = ctx.reg_alloc.ScratchXmm();
|
||||
|
||||
ZeroIfNaN<64>(code, src, scratch);
|
||||
ZeroIfNaN(code, src, scratch, 64);
|
||||
code.maxsd(src, code.Const(xword, unsigned_ ? f64_min_u16 : f64_min_s16));
|
||||
code.minsd(src, code.Const(xword, unsigned_ ? f64_max_u16 : f64_max_s16));
|
||||
code.cvttsd2si(result, src); // 64 bit gpr
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
*/
|
||||
|
||||
#include <mcl/macro/concatenate_tokens.hpp>
|
||||
#include "dynarmic/common/type_util.h"
|
||||
|
||||
#define AxxEmitX64 CONCATENATE_TOKENS(Axx, EmitX64)
|
||||
#define AxxEmitContext CONCATENATE_TOKENS(Axx, EmitContext)
|
||||
@@ -15,14 +16,11 @@ using Vector = std::array<u64, 2>;
|
||||
}
|
||||
|
||||
std::optional<AxxEmitX64::DoNotFastmemMarker> AxxEmitX64::ShouldFastmem(AxxEmitContext& ctx, IR::Inst* inst) const {
|
||||
if (!conf.fastmem_pointer || !exception_handler.SupportsFastmem()) {
|
||||
if (!conf.fastmem_pointer || !exception_handler.SupportsFastmem())
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
const auto marker = std::make_tuple(ctx.Location(), inst->GetName());
|
||||
if (do_not_fastmem.count(marker) > 0) {
|
||||
if (do_not_fastmem.count(marker) > 0)
|
||||
return std::nullopt;
|
||||
}
|
||||
return marker;
|
||||
}
|
||||
|
||||
@@ -58,16 +56,12 @@ void AxxEmitX64::EmitMemoryRead(AxxEmitContext& ctx, IR::Inst* inst) {
|
||||
// Neither fastmem nor page table: Use callbacks
|
||||
if constexpr (bitsize == 128) {
|
||||
ctx.reg_alloc.HostCall(nullptr, {}, args[1]);
|
||||
if (ordered) {
|
||||
code.mfence();
|
||||
}
|
||||
if (ordered) code.mfence();
|
||||
code.CallFunction(memory_read_128);
|
||||
ctx.reg_alloc.DefineValue(inst, xmm1);
|
||||
} else {
|
||||
ctx.reg_alloc.HostCall(inst, {}, args[1]);
|
||||
if (ordered) {
|
||||
code.mfence();
|
||||
}
|
||||
if (ordered) code.mfence();
|
||||
Devirtualize<callback>(conf.callbacks).EmitCall(code);
|
||||
code.ZeroExtendFrom(bitsize, code.ABI_RETURN);
|
||||
}
|
||||
@@ -102,10 +96,10 @@ void AxxEmitX64::EmitMemoryRead(AxxEmitContext& ctx, IR::Inst* inst) {
|
||||
code.call(wrapped_fn);
|
||||
|
||||
fastmem_patch_info.emplace(
|
||||
mcl::bit_cast<u64>(location),
|
||||
std::bit_cast<u64>(location),
|
||||
FastmemPatchInfo{
|
||||
mcl::bit_cast<u64>(code.getCurr()),
|
||||
mcl::bit_cast<u64>(wrapped_fn),
|
||||
std::bit_cast<u64>(code.getCurr()),
|
||||
std::bit_cast<u64>(wrapped_fn),
|
||||
*fastmem_marker,
|
||||
conf.recompile_on_fastmem_failure,
|
||||
});
|
||||
@@ -153,9 +147,7 @@ void AxxEmitX64::EmitMemoryWrite(AxxEmitContext& ctx, IR::Inst* inst) {
|
||||
ctx.reg_alloc.HostCall(nullptr, {}, args[1], args[2]);
|
||||
Devirtualize<callback>(conf.callbacks).EmitCall(code);
|
||||
}
|
||||
if (ordered) {
|
||||
code.mfence();
|
||||
}
|
||||
if (ordered) code.mfence();
|
||||
EmitCheckMemoryAbort(ctx, inst);
|
||||
return;
|
||||
}
|
||||
@@ -189,10 +181,10 @@ void AxxEmitX64::EmitMemoryWrite(AxxEmitContext& ctx, IR::Inst* inst) {
|
||||
code.call(wrapped_fn);
|
||||
|
||||
fastmem_patch_info.emplace(
|
||||
mcl::bit_cast<u64>(location),
|
||||
std::bit_cast<u64>(location),
|
||||
FastmemPatchInfo{
|
||||
mcl::bit_cast<u64>(code.getCurr()),
|
||||
mcl::bit_cast<u64>(wrapped_fn),
|
||||
std::bit_cast<u64>(code.getCurr()),
|
||||
std::bit_cast<u64>(wrapped_fn),
|
||||
*fastmem_marker,
|
||||
conf.recompile_on_fastmem_failure,
|
||||
});
|
||||
@@ -223,7 +215,7 @@ void AxxEmitX64::EmitExclusiveReadMemory(AxxEmitContext& ctx, IR::Inst* inst) {
|
||||
const bool ordered = IsOrdered(args[2].GetImmediateAccType());
|
||||
|
||||
if constexpr (bitsize != 128) {
|
||||
using T = mcl::unsigned_integer_of_size<bitsize>;
|
||||
using T = Common::UnsignedIntegerN<bitsize>;
|
||||
|
||||
ctx.reg_alloc.HostCall(inst, {}, args[1]);
|
||||
|
||||
@@ -290,16 +282,14 @@ void AxxEmitX64::EmitExclusiveWriteMemory(AxxEmitContext& ctx, IR::Inst* inst) {
|
||||
code.mov(code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)], u8(0));
|
||||
code.mov(code.ABI_PARAM1, reinterpret_cast<u64>(&conf));
|
||||
if constexpr (bitsize != 128) {
|
||||
using T = mcl::unsigned_integer_of_size<bitsize>;
|
||||
using T = Common::UnsignedIntegerN<bitsize>;
|
||||
|
||||
code.CallLambda(
|
||||
[](AxxUserConfig& conf, Axx::VAddr vaddr, T value) -> u32 {
|
||||
return conf.global_monitor->DoExclusiveOperation<T>(conf.processor_id, vaddr,
|
||||
[&](T expected) -> bool {
|
||||
return (conf.callbacks->*callback)(vaddr, value, expected);
|
||||
})
|
||||
? 0
|
||||
: 1;
|
||||
[&](T expected) -> bool {
|
||||
return (conf.callbacks->*callback)(vaddr, value, expected);
|
||||
}) ? 0 : 1;
|
||||
});
|
||||
if (ordered) {
|
||||
code.mfence();
|
||||
@@ -311,11 +301,9 @@ void AxxEmitX64::EmitExclusiveWriteMemory(AxxEmitContext& ctx, IR::Inst* inst) {
|
||||
code.CallLambda(
|
||||
[](AxxUserConfig& conf, Axx::VAddr vaddr, Vector& value) -> u32 {
|
||||
return conf.global_monitor->DoExclusiveOperation<Vector>(conf.processor_id, vaddr,
|
||||
[&](Vector expected) -> bool {
|
||||
return (conf.callbacks->*callback)(vaddr, value, expected);
|
||||
})
|
||||
? 0
|
||||
: 1;
|
||||
[&](Vector expected) -> bool {
|
||||
return (conf.callbacks->*callback)(vaddr, value, expected);
|
||||
}) ? 0 : 1;
|
||||
});
|
||||
if (ordered) {
|
||||
code.mfence();
|
||||
@@ -356,7 +344,7 @@ void AxxEmitX64::EmitExclusiveReadMemoryInline(AxxEmitContext& ctx, IR::Inst* in
|
||||
EmitExclusiveLock(code, conf, tmp, tmp2.cvt32());
|
||||
|
||||
code.mov(code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)], u8(1));
|
||||
code.mov(tmp, mcl::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, conf.processor_id)));
|
||||
code.mov(tmp, std::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, conf.processor_id)));
|
||||
code.mov(qword[tmp], vaddr);
|
||||
|
||||
const auto fastmem_marker = ShouldFastmem(ctx, inst);
|
||||
@@ -369,10 +357,10 @@ void AxxEmitX64::EmitExclusiveReadMemoryInline(AxxEmitContext& ctx, IR::Inst* in
|
||||
const auto location = EmitReadMemoryMov<bitsize>(code, value_idx, src_ptr, ordered);
|
||||
|
||||
fastmem_patch_info.emplace(
|
||||
mcl::bit_cast<u64>(location),
|
||||
std::bit_cast<u64>(location),
|
||||
FastmemPatchInfo{
|
||||
mcl::bit_cast<u64>(code.getCurr()),
|
||||
mcl::bit_cast<u64>(wrapped_fn),
|
||||
std::bit_cast<u64>(code.getCurr()),
|
||||
std::bit_cast<u64>(wrapped_fn),
|
||||
*fastmem_marker,
|
||||
conf.recompile_on_exclusive_fastmem_failure,
|
||||
});
|
||||
@@ -390,7 +378,7 @@ void AxxEmitX64::EmitExclusiveReadMemoryInline(AxxEmitContext& ctx, IR::Inst* in
|
||||
code.call(wrapped_fn);
|
||||
}
|
||||
|
||||
code.mov(tmp, mcl::bit_cast<u64>(GetExclusiveMonitorValuePointer(conf.global_monitor, conf.processor_id)));
|
||||
code.mov(tmp, std::bit_cast<u64>(GetExclusiveMonitorValuePointer(conf.global_monitor, conf.processor_id)));
|
||||
EmitWriteMemoryMov<bitsize>(code, tmp, value_idx, false);
|
||||
|
||||
EmitExclusiveUnlock(code, conf, tmp, tmp2.cvt32());
|
||||
@@ -437,7 +425,7 @@ void AxxEmitX64::EmitExclusiveWriteMemoryInline(AxxEmitContext& ctx, IR::Inst* i
|
||||
|
||||
SharedLabel end = GenSharedLabel();
|
||||
|
||||
code.mov(tmp, mcl::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, conf.processor_id)));
|
||||
code.mov(tmp, std::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, conf.processor_id)));
|
||||
code.mov(status, u32(1));
|
||||
code.cmp(code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)], u8(0));
|
||||
code.je(*end, code.T_NEAR);
|
||||
@@ -447,7 +435,7 @@ void AxxEmitX64::EmitExclusiveWriteMemoryInline(AxxEmitContext& ctx, IR::Inst* i
|
||||
EmitExclusiveTestAndClear(code, conf, vaddr, tmp, rax);
|
||||
|
||||
code.mov(code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)], u8(0));
|
||||
code.mov(tmp, mcl::bit_cast<u64>(GetExclusiveMonitorValuePointer(conf.global_monitor, conf.processor_id)));
|
||||
code.mov(tmp, std::bit_cast<u64>(GetExclusiveMonitorValuePointer(conf.global_monitor, conf.processor_id)));
|
||||
|
||||
if constexpr (bitsize == 128) {
|
||||
code.mov(rax, qword[tmp + 0]);
|
||||
@@ -475,25 +463,20 @@ void AxxEmitX64::EmitExclusiveWriteMemoryInline(AxxEmitContext& ctx, IR::Inst* i
|
||||
const auto location = code.getCurr();
|
||||
|
||||
if constexpr (bitsize == 128) {
|
||||
code.lock();
|
||||
code.cmpxchg16b(ptr[dest_ptr]);
|
||||
code.lock(); code.cmpxchg16b(ptr[dest_ptr]);
|
||||
} else {
|
||||
switch (bitsize) {
|
||||
case 8:
|
||||
code.lock();
|
||||
code.cmpxchg(code.byte[dest_ptr], value.cvt8());
|
||||
code.lock(); code.cmpxchg(code.byte[dest_ptr], value.cvt8());
|
||||
break;
|
||||
case 16:
|
||||
code.lock();
|
||||
code.cmpxchg(word[dest_ptr], value.cvt16());
|
||||
code.lock(); code.cmpxchg(word[dest_ptr], value.cvt16());
|
||||
break;
|
||||
case 32:
|
||||
code.lock();
|
||||
code.cmpxchg(dword[dest_ptr], value.cvt32());
|
||||
code.lock(); code.cmpxchg(dword[dest_ptr], value.cvt32());
|
||||
break;
|
||||
case 64:
|
||||
code.lock();
|
||||
code.cmpxchg(qword[dest_ptr], value.cvt64());
|
||||
code.lock(); code.cmpxchg(qword[dest_ptr], value.cvt64());
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
@@ -506,10 +489,10 @@ void AxxEmitX64::EmitExclusiveWriteMemoryInline(AxxEmitContext& ctx, IR::Inst* i
|
||||
code.call(wrapped_fn);
|
||||
|
||||
fastmem_patch_info.emplace(
|
||||
mcl::bit_cast<u64>(location),
|
||||
std::bit_cast<u64>(location),
|
||||
FastmemPatchInfo{
|
||||
mcl::bit_cast<u64>(code.getCurr()),
|
||||
mcl::bit_cast<u64>(wrapped_fn),
|
||||
std::bit_cast<u64>(code.getCurr()),
|
||||
std::bit_cast<u64>(wrapped_fn),
|
||||
*fastmem_marker,
|
||||
conf.recompile_on_exclusive_fastmem_failure,
|
||||
});
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2022 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <mcl/bit_cast.hpp>
|
||||
#include <numeric>
|
||||
#include <bit>
|
||||
#include <xbyak/xbyak.h>
|
||||
|
||||
#include "dynarmic/backend/x64/a32_emit_x64.h"
|
||||
@@ -342,7 +346,7 @@ void EmitExclusiveLock(BlockOfCode& code, const UserConfig& conf, Xbyak::Reg64 p
|
||||
return;
|
||||
}
|
||||
|
||||
code.mov(pointer, mcl::bit_cast<u64>(GetExclusiveMonitorLockPointer(conf.global_monitor)));
|
||||
code.mov(pointer, std::bit_cast<u64>(GetExclusiveMonitorLockPointer(conf.global_monitor)));
|
||||
EmitSpinLockLock(code, pointer, tmp);
|
||||
}
|
||||
|
||||
@@ -352,7 +356,7 @@ void EmitExclusiveUnlock(BlockOfCode& code, const UserConfig& conf, Xbyak::Reg64
|
||||
return;
|
||||
}
|
||||
|
||||
code.mov(pointer, mcl::bit_cast<u64>(GetExclusiveMonitorLockPointer(conf.global_monitor)));
|
||||
code.mov(pointer, std::bit_cast<u64>(GetExclusiveMonitorLockPointer(conf.global_monitor)));
|
||||
EmitSpinLockUnlock(code, pointer, tmp);
|
||||
}
|
||||
|
||||
@@ -369,7 +373,7 @@ void EmitExclusiveTestAndClear(BlockOfCode& code, const UserConfig& conf, Xbyak:
|
||||
continue;
|
||||
}
|
||||
Xbyak::Label ok;
|
||||
code.mov(pointer, mcl::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, processor_index)));
|
||||
code.mov(pointer, std::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, processor_index)));
|
||||
code.cmp(qword[pointer], vaddr);
|
||||
code.jne(ok, code.T_NEAR);
|
||||
code.mov(qword[pointer], tmp);
|
||||
|
||||
@@ -11,13 +11,14 @@
|
||||
#include "dynarmic/common/assert.h"
|
||||
#include <mcl/bit/bit_field.hpp>
|
||||
#include "dynarmic/common/common_types.h"
|
||||
#include <mcl/type_traits/integer_of_size.hpp>
|
||||
|
||||
#include "dynarmic/backend/x64/block_of_code.h"
|
||||
#include "dynarmic/backend/x64/emit_x64.h"
|
||||
#include "dynarmic/ir/basic_block.h"
|
||||
#include "dynarmic/ir/microinstruction.h"
|
||||
#include "dynarmic/ir/opcodes.h"
|
||||
#include "dynarmic/common/fp/util.h"
|
||||
#include "dynarmic/common/type_util.h"
|
||||
|
||||
namespace Dynarmic::Backend::X64 {
|
||||
|
||||
@@ -38,7 +39,7 @@ void EmitSignedSaturatedOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst)
|
||||
Xbyak::Reg addend = ctx.reg_alloc.UseGpr(args[1]).changeBit(size);
|
||||
Xbyak::Reg overflow = ctx.reg_alloc.ScratchGpr().changeBit(size);
|
||||
|
||||
constexpr u64 int_max = static_cast<u64>((std::numeric_limits<mcl::signed_integer_of_size<size>>::max)());
|
||||
constexpr u64 int_max = static_cast<u64>((std::numeric_limits<Common::SignedIntegerN<size>>::max)());
|
||||
if constexpr (size < 64) {
|
||||
code.xor_(overflow.cvt32(), overflow.cvt32());
|
||||
code.bt(result.cvt32(), size - 1);
|
||||
@@ -82,7 +83,7 @@ void EmitUnsignedSaturatedOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst
|
||||
Xbyak::Reg op_result = ctx.reg_alloc.UseScratchGpr(args[0]).changeBit(size);
|
||||
Xbyak::Reg addend = ctx.reg_alloc.UseScratchGpr(args[1]).changeBit(size);
|
||||
|
||||
constexpr u64 boundary = op == Op::Add ? (std::numeric_limits<mcl::unsigned_integer_of_size<size>>::max)() : 0;
|
||||
constexpr u64 boundary = op == Op::Add ? (std::numeric_limits<Common::UnsignedIntegerN<size>>::max)() : 0;
|
||||
|
||||
if constexpr (op == Op::Add) {
|
||||
code.add(op_result, addend);
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
#include <mcl/mp/typelist/list.hpp>
|
||||
#include <mcl/mp/typelist/lower_to_tuple.hpp>
|
||||
#include <mcl/type_traits/function_info.hpp>
|
||||
#include <mcl/type_traits/integer_of_size.hpp>
|
||||
#include <xbyak/xbyak.h>
|
||||
|
||||
#include "dynarmic/backend/x64/abi.h"
|
||||
@@ -31,27 +30,14 @@
|
||||
#include "dynarmic/common/fp/info.h"
|
||||
#include "dynarmic/common/fp/op.h"
|
||||
#include "dynarmic/common/fp/util.h"
|
||||
#include "dynarmic/common/type_util.h"
|
||||
#include "dynarmic/common/lut_from_list.h"
|
||||
#include "dynarmic/interface/optimization_flags.h"
|
||||
#include "dynarmic/ir/basic_block.h"
|
||||
#include "dynarmic/ir/microinstruction.h"
|
||||
|
||||
#define FCODE(NAME) \
|
||||
[&code](auto... args) { \
|
||||
if constexpr (fsize == 32) { \
|
||||
code.NAME##s(args...); \
|
||||
} else { \
|
||||
code.NAME##d(args...); \
|
||||
} \
|
||||
}
|
||||
#define ICODE(NAME) \
|
||||
[&code](auto... args) { \
|
||||
if constexpr (fsize == 32) { \
|
||||
code.NAME##d(args...); \
|
||||
} else { \
|
||||
code.NAME##q(args...); \
|
||||
} \
|
||||
}
|
||||
#define FCODE(NAME) [&](auto... args) { if (fsize == 32) code.NAME##s(args...); else code.NAME##d(args...); }
|
||||
#define ICODE(NAME) [&](auto... args) { if (fsize == 32) code.NAME##d(args...); else code.NAME##q(args...); }
|
||||
|
||||
namespace Dynarmic::Backend::X64 {
|
||||
|
||||
@@ -76,7 +62,7 @@ void MaybeStandardFPSCRValue(BlockOfCode& code, EmitContext& ctx, bool fpcr_cont
|
||||
template<size_t fsize, template<typename> class Indexer, size_t narg>
|
||||
struct NaNHandler {
|
||||
public:
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
|
||||
using function_type = void (*)(std::array<VectorArray<FPT>, narg>&, FP::FPCR);
|
||||
|
||||
@@ -158,33 +144,33 @@ Xbyak::Address GetVectorOf(BlockOfCode& code) {
|
||||
|
||||
template<size_t fsize>
|
||||
Xbyak::Address GetNaNVector(BlockOfCode& code) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
return GetVectorOf<fsize, FP::FPInfo<FPT>::DefaultNaN()>(code);
|
||||
}
|
||||
|
||||
template<size_t fsize>
|
||||
Xbyak::Address GetNegativeZeroVector(BlockOfCode& code) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
return GetVectorOf<fsize, FP::FPInfo<FPT>::Zero(true)>(code);
|
||||
}
|
||||
|
||||
template<size_t fsize>
|
||||
Xbyak::Address GetNonSignMaskVector(BlockOfCode& code) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
constexpr FPT non_sign_mask = FP::FPInfo<FPT>::exponent_mask | FP::FPInfo<FPT>::mantissa_mask;
|
||||
return GetVectorOf<fsize, non_sign_mask>(code);
|
||||
}
|
||||
|
||||
template<size_t fsize>
|
||||
Xbyak::Address GetSmallestNormalVector(BlockOfCode& code) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
constexpr FPT smallest_normal_number = FP::FPValue<FPT, false, FP::FPInfo<FPT>::exponent_min, 1>();
|
||||
return GetVectorOf<fsize, smallest_normal_number>(code);
|
||||
}
|
||||
|
||||
template<size_t fsize, bool sign, int exponent, mcl::unsigned_integer_of_size<fsize> value>
|
||||
template<size_t fsize, bool sign, int exponent, Common::UnsignedIntegerN<fsize> value>
|
||||
Xbyak::Address GetVectorOf(BlockOfCode& code) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
return GetVectorOf<fsize, FP::FPValue<FPT, sign, exponent, value>()>(code);
|
||||
}
|
||||
|
||||
@@ -1085,7 +1071,7 @@ static void EmitFPVectorMinMaxNumeric(BlockOfCode& code, EmitContext& ctx, IR::I
|
||||
|
||||
if (code.HasHostFeature(HostFeature::AVX)) {
|
||||
MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&] {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
|
||||
// result = xmm_a == SNaN || xmm_b == QNaN
|
||||
{
|
||||
@@ -1158,7 +1144,7 @@ static void EmitFPVectorMinMaxNumeric(BlockOfCode& code, EmitContext& ctx, IR::I
|
||||
}
|
||||
|
||||
MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&] {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
|
||||
// result = xmm_a == SNaN || xmm_b == QNaN
|
||||
{
|
||||
@@ -1314,7 +1300,7 @@ static void EmitFPVectorMulAddFallback(VectorArray<FPT>& result, const VectorArr
|
||||
|
||||
template<size_t fsize>
|
||||
void EmitFPVectorMulAdd(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
|
||||
const auto fallback_fn = [](VectorArray<FPT>& result, const VectorArray<FPT>& addend, const VectorArray<FPT>& op1, const VectorArray<FPT>& op2, FP::FPCR fpcr, FP::FPSR& fpsr) {
|
||||
for (size_t i = 0; i < result.size(); i++) {
|
||||
@@ -1425,7 +1411,7 @@ void EmitX64::EmitFPVectorMulAdd64(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
template<size_t fsize>
|
||||
static void EmitFPVectorMulX(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
const bool fpcr_controlled = args[2].GetImmediateU1();
|
||||
@@ -1491,7 +1477,7 @@ void EmitX64::EmitFPVectorMulX64(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
template<size_t fsize>
|
||||
void FPVectorNeg(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
constexpr FPT sign_mask = FP::FPInfo<FPT>::sign_mask;
|
||||
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
@@ -1544,7 +1530,7 @@ void EmitX64::EmitFPVectorPairedAddLower64(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
template<size_t fsize>
|
||||
static void EmitRecipEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
|
||||
if constexpr (fsize != 16) {
|
||||
if (ctx.HasOptimization(OptimizationFlag::Unsafe_ReducedErrorFP)) {
|
||||
@@ -1590,7 +1576,7 @@ void EmitX64::EmitFPVectorRecipEstimate64(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
template<size_t fsize>
|
||||
static void EmitRecipStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
|
||||
const auto fallback_fn = [](VectorArray<FPT>& result, const VectorArray<FPT>& op1, const VectorArray<FPT>& op2, FP::FPCR fpcr, FP::FPSR& fpsr) {
|
||||
for (size_t i = 0; i < result.size(); i++) {
|
||||
@@ -1714,7 +1700,7 @@ void EmitFPVectorRoundInt(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
}
|
||||
|
||||
// Do not make a LUT out of this, let the compiler do it's thing
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
switch (rounding) {
|
||||
case FP::RoundingMode::ToNearest_TieEven:
|
||||
exact
|
||||
@@ -1760,7 +1746,7 @@ void EmitX64::EmitFPVectorRoundInt64(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
template<size_t fsize>
|
||||
static void EmitRSqrtEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
|
||||
const auto fallback_fn = [](VectorArray<FPT>& result, const VectorArray<FPT>& operand, FP::FPCR fpcr, FP::FPSR& fpsr) {
|
||||
for (size_t i = 0; i < result.size(); i++) {
|
||||
@@ -1852,7 +1838,7 @@ void EmitX64::EmitFPVectorRSqrtEstimate64(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
template<size_t fsize>
|
||||
static void EmitRSqrtStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||
using FPT = Common::UnsignedIntegerN<fsize>;
|
||||
|
||||
const auto fallback_fn = [](VectorArray<FPT>& result, const VectorArray<FPT>& op1, const VectorArray<FPT>& op2, FP::FPCR fpcr, FP::FPSR& fpsr) {
|
||||
for (size_t i = 0; i < result.size(); i++) {
|
||||
@@ -2126,7 +2112,7 @@ void EmitFPVectorToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
FCODE(orp)(src, exceed_unsigned);
|
||||
}
|
||||
} else {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>; // WORKAROUND: For issue 678 on MSVC
|
||||
using FPT = Common::UnsignedIntegerN<fsize>; // WORKAROUND: For issue 678 on MSVC
|
||||
constexpr u64 integer_max = FPT((std::numeric_limits<std::conditional_t<unsigned_, FPT, std::make_signed_t<FPT>>>::max)());
|
||||
|
||||
code.movaps(xmm0, GetVectorOf<fsize, float_upper_limit_signed>(code));
|
||||
@@ -2150,7 +2136,7 @@ void EmitFPVectorToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
mp::lift_value<FP::RoundingMode::ToNearest_TieAwayFromZero>>;
|
||||
|
||||
static const auto lut = Common::GenerateLookupTableFromList([]<typename I>(I) {
|
||||
using FPT = mcl::unsigned_integer_of_size<fsize>; // WORKAROUND: For issue 678 on MSVC
|
||||
using FPT = Common::UnsignedIntegerN<fsize>; // WORKAROUND: For issue 678 on MSVC
|
||||
return std::pair{
|
||||
mp::lower_to_tuple_v<I>,
|
||||
Common::FptrCast([](VectorArray<FPT>& output, const VectorArray<FPT>& input, FP::FPCR fpcr, FP::FPSR& fpsr) {
|
||||
|
||||
@@ -14,23 +14,8 @@
|
||||
#include "dynarmic/ir/microinstruction.h"
|
||||
#include "dynarmic/ir/opcodes.h"
|
||||
|
||||
#define FCODE(NAME) \
|
||||
[&code](auto... args) { \
|
||||
if constexpr (esize == 32) { \
|
||||
code.NAME##s(args...); \
|
||||
} else { \
|
||||
code.NAME##d(args...); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define ICODE(NAME) \
|
||||
[&code](auto... args) { \
|
||||
if constexpr (esize == 32) { \
|
||||
code.NAME##d(args...); \
|
||||
} else { \
|
||||
code.NAME##q(args...); \
|
||||
} \
|
||||
}
|
||||
#define FCODE(NAME) [&](auto... args) { if (esize == 32) code.NAME##s(args...); else code.NAME##d(args...); }
|
||||
#define ICODE(NAME) [&](auto... args) { if (esize == 32) code.NAME##d(args...); else code.NAME##q(args...); }
|
||||
|
||||
namespace Dynarmic::Backend::X64 {
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
#include <vector>
|
||||
|
||||
#include "dynarmic/common/assert.h"
|
||||
#include <mcl/bit_cast.hpp>
|
||||
#include <numeric>
|
||||
#include "dynarmic/common/common_types.h"
|
||||
|
||||
#include "dynarmic/backend/exception_handler.h"
|
||||
@@ -184,20 +184,20 @@ struct ExceptionHandler::Impl final {
|
||||
// Our 3rd argument is a PCONTEXT.
|
||||
|
||||
// If not within our codeblock, ignore this exception.
|
||||
code.mov(code.rax, Safe::Negate(mcl::bit_cast<u64>(code.getCode())));
|
||||
code.mov(code.rax, Safe::Negate(std::bit_cast<u64>(code.getCode())));
|
||||
code.add(code.rax, code.qword[code.ABI_PARAM3 + Xbyak::RegExp(offsetof(CONTEXT, Rip))]);
|
||||
code.cmp(code.rax, static_cast<u32>(code.GetTotalCodeSize()));
|
||||
code.ja(exception_handler_without_cb);
|
||||
|
||||
code.lea(code.rsp, code.ptr[code.rsp - 8]);
|
||||
code.mov(code.ABI_PARAM1, mcl::bit_cast<u64>(&cb));
|
||||
code.mov(code.ABI_PARAM1, std::bit_cast<u64>(&cb));
|
||||
code.mov(code.ABI_PARAM2, code.ABI_PARAM3);
|
||||
code.CallLambda(
|
||||
[](const std::function<FakeCall(u64)>& cb_, PCONTEXT ctx) {
|
||||
FakeCall fc = cb_(ctx->Rip);
|
||||
|
||||
ctx->Rsp -= sizeof(u64);
|
||||
*mcl::bit_cast<u64*>(ctx->Rsp) = fc.ret_rip;
|
||||
*std::bit_cast<u64*>(ctx->Rsp) = fc.ret_rip;
|
||||
ctx->Rip = fc.call_rip;
|
||||
});
|
||||
code.add(code.rsp, 8);
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2018 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
@@ -6,8 +9,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <string_view>
|
||||
|
||||
#include <mcl/bit_cast.hpp>
|
||||
#include <bit>
|
||||
#include <numeric>
|
||||
|
||||
namespace Dynarmic::Backend::X64 {
|
||||
|
||||
@@ -17,7 +20,7 @@ void PerfMapRegister(const void* start, const void* end, std::string_view friend
|
||||
|
||||
template<typename T>
|
||||
void PerfMapRegister(T start, const void* end, std::string_view friendly_name) {
|
||||
detail::PerfMapRegister(mcl::bit_cast<const void*>(start), end, friendly_name);
|
||||
detail::PerfMapRegister(std::bit_cast<const void*>(start), end, friendly_name);
|
||||
}
|
||||
|
||||
void PerfMapClear();
|
||||
|
||||
@@ -11,10 +11,10 @@
|
||||
#include <algorithm>
|
||||
#include <numeric>
|
||||
#include <utility>
|
||||
#include <bit>
|
||||
|
||||
#include <fmt/ostream.h>
|
||||
#include "dynarmic/common/assert.h"
|
||||
#include <mcl/bit_cast.hpp>
|
||||
#include <xbyak/xbyak.h>
|
||||
|
||||
#include "dynarmic/backend/x64/abi.h"
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2023 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace Dynarmic::Common {
|
||||
|
||||
template<typename T>
|
||||
inline constexpr bool always_false_v = false;
|
||||
|
||||
} // namespace Dynarmic::Common
|
||||
@@ -16,38 +16,33 @@ template<typename... Ts>
|
||||
|
||||
// Temporary until MCL is fully removed
|
||||
#ifndef ASSERT_MSG
|
||||
#define ASSERT_MSG(_a_, ...) \
|
||||
([&]() { \
|
||||
if (!(_a_)) [[unlikely]] { \
|
||||
assert_terminate(#_a_, __VA_ARGS__); \
|
||||
} \
|
||||
}())
|
||||
# define ASSERT_MSG(_a_, ...) do if (!(_a_)) [[unlikely]] assert_terminate(#_a_, __VA_ARGS__); while(0)
|
||||
#endif
|
||||
#ifndef ASSERT_FALSE
|
||||
#define ASSERT_FALSE(...) \
|
||||
([&]() { \
|
||||
assert_terminate("false", __VA_ARGS__); \
|
||||
}())
|
||||
# define ASSERT_FALSE(...) assert_terminate("false", __VA_ARGS__)
|
||||
#endif
|
||||
|
||||
#ifndef ASSERT
|
||||
#define ASSERT(_a_) ASSERT_MSG(_a_, "")
|
||||
# define ASSERT(_a_) ASSERT_MSG(_a_, "")
|
||||
#endif
|
||||
#ifndef UNREACHABLE
|
||||
#define UNREACHABLE() ASSERT_MSG(false, "unreachable")
|
||||
# ifdef _MSC_VER
|
||||
# define UNREACHABLE() ASSERT_FALSE("unreachable")
|
||||
# else
|
||||
# define UNREACHABLE() __builtin_unreachable();
|
||||
# endif
|
||||
#endif
|
||||
#ifdef _DEBUG
|
||||
#ifndef DEBUG_ASSERT
|
||||
#define DEBUG_ASSERT(_a_) ASSERT(_a_)
|
||||
#endif
|
||||
#ifndef DEBUG_ASSERT_MSG
|
||||
#define DEBUG_ASSERT_MSG(_a_, ...) ASSERT_MSG(_a_, __VA_ARGS__)
|
||||
#endif
|
||||
# ifndef DEBUG_ASSERT
|
||||
# define DEBUG_ASSERT(_a_) ASSERT(_a_)
|
||||
# endif
|
||||
# ifndef DEBUG_ASSERT_MSG
|
||||
# define DEBUG_ASSERT_MSG(_a_, ...) ASSERT_MSG(_a_, __VA_ARGS__)
|
||||
# endif
|
||||
#else // not debug
|
||||
#ifndef DEBUG_ASSERT
|
||||
#define DEBUG_ASSERT(_a_)
|
||||
#endif
|
||||
#ifndef DEBUG_ASSERT_MSG
|
||||
#define DEBUG_ASSERT_MSG(_a_, _desc_, ...)
|
||||
#endif
|
||||
# ifndef DEBUG_ASSERT
|
||||
# define DEBUG_ASSERT(_a_)
|
||||
# endif
|
||||
# ifndef DEBUG_ASSERT_MSG
|
||||
# define DEBUG_ASSERT_MSG(_a_, _desc_, ...)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2018 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <mcl/type_traits/function_info.hpp>
|
||||
|
||||
namespace Dynarmic::Common {
|
||||
|
||||
/// Cast a lambda into an equivalent function pointer.
|
||||
template<class Function>
|
||||
inline auto FptrCast(Function f) noexcept {
|
||||
return static_cast<mcl::equivalent_function_type<Function>*>(f);
|
||||
}
|
||||
|
||||
} // namespace Dynarmic::Common
|
||||
@@ -1,3 +1,6 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2018 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
@@ -6,6 +9,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <optional>
|
||||
#include <cstdint>
|
||||
|
||||
#include "dynarmic/common/fp/fpcr.h"
|
||||
#include "dynarmic/common/fp/info.h"
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
*/
|
||||
|
||||
#include <string>
|
||||
|
||||
#include <bit>
|
||||
#include <fmt/format.h>
|
||||
|
||||
#ifdef DYNARMIC_USE_LLVM
|
||||
@@ -16,7 +16,6 @@
|
||||
#endif
|
||||
|
||||
#include "dynarmic/common/assert.h"
|
||||
#include <mcl/bit_cast.hpp>
|
||||
#include "dynarmic/common/common_types.h"
|
||||
|
||||
#include "dynarmic/common/llvm_disassemble.h"
|
||||
@@ -53,7 +52,7 @@ std::string DisassembleX64(const void* begin, const void* end) {
|
||||
LLVMDisasmDispose(llvm_ctx);
|
||||
#else
|
||||
result += fmt::format("(recompile with DYNARMIC_USE_LLVM=ON to disassemble the generated x86_64 code)\n");
|
||||
result += fmt::format("start: {:016x}, end: {:016x}\n", mcl::bit_cast<u64>(begin), mcl::bit_cast<u64>(end));
|
||||
result += fmt::format("start: {:016x}, end: {:016x}\n", std::bit_cast<u64>(begin), std::bit_cast<u64>(end));
|
||||
#endif
|
||||
|
||||
return result;
|
||||
|
||||
31
src/dynarmic/src/dynarmic/common/type_util.h
Normal file
31
src/dynarmic/src/dynarmic/common/type_util.h
Normal file
@@ -0,0 +1,31 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2018 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <mcl/type_traits/function_info.hpp>
|
||||
|
||||
namespace Dynarmic::Common {
|
||||
|
||||
/// Cast a lambda into an equivalent function pointer.
|
||||
template<class Function>
|
||||
inline auto FptrCast(Function f) noexcept {
|
||||
return static_cast<mcl::equivalent_function_type<Function>*>(f);
|
||||
}
|
||||
|
||||
namespace Detail {
|
||||
template<std::size_t size> struct IntegerOfSize {};
|
||||
template<> struct IntegerOfSize<8> { using U = std::uint8_t; using S = std::int8_t; };
|
||||
template<> struct IntegerOfSize<16> { using U = std::uint16_t; using S = std::int16_t; };
|
||||
template<> struct IntegerOfSize<32> { using U = std::uint32_t; using S = std::int32_t; };
|
||||
template<> struct IntegerOfSize<64> { using U = std::uint64_t; using S = std::int64_t; };
|
||||
}
|
||||
template<size_t N> using UnsignedIntegerN = typename Detail::IntegerOfSize<N>::U;
|
||||
template<size_t N> using SignedIntegerN = typename Detail::IntegerOfSize<N>::S;
|
||||
|
||||
} // namespace Dynarmic::Common
|
||||
@@ -11,7 +11,7 @@
|
||||
#include <vector>
|
||||
|
||||
#include "dynarmic/common/assert.h"
|
||||
#include <mcl/bit_cast.hpp>
|
||||
#include <numeric>
|
||||
|
||||
#include "dynarmic/ir/opcodes.h"
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
#include "dynarmic/common/common_types.h"
|
||||
#include "dynarmic/common/assert.h"
|
||||
#include <mcl/bit_cast.hpp>
|
||||
#include <numeric>
|
||||
|
||||
#include "dynarmic/ir/opcodes.h"
|
||||
#include "dynarmic/ir/acc_type.h"
|
||||
@@ -2931,19 +2931,19 @@ public:
|
||||
}
|
||||
|
||||
void CallHostFunction(void (*fn)(void)) {
|
||||
Inst(Opcode::CallHostFunction, Imm64(mcl::bit_cast<u64>(fn)), Value{}, Value{}, Value{});
|
||||
Inst(Opcode::CallHostFunction, Imm64(std::bit_cast<u64>(fn)), Value{}, Value{}, Value{});
|
||||
}
|
||||
|
||||
void CallHostFunction(void (*fn)(u64), const U64& arg1) {
|
||||
Inst(Opcode::CallHostFunction, Imm64(mcl::bit_cast<u64>(fn)), arg1, Value{}, Value{});
|
||||
Inst(Opcode::CallHostFunction, Imm64(std::bit_cast<u64>(fn)), arg1, Value{}, Value{});
|
||||
}
|
||||
|
||||
void CallHostFunction(void (*fn)(u64, u64), const U64& arg1, const U64& arg2) {
|
||||
Inst(Opcode::CallHostFunction, Imm64(mcl::bit_cast<u64>(fn)), arg1, arg2, Value{});
|
||||
Inst(Opcode::CallHostFunction, Imm64(std::bit_cast<u64>(fn)), arg1, arg2, Value{});
|
||||
}
|
||||
|
||||
void CallHostFunction(void (*fn)(u64, u64, u64), const U64& arg1, const U64& arg2, const U64& arg3) {
|
||||
Inst(Opcode::CallHostFunction, Imm64(mcl::bit_cast<u64>(fn)), arg1, arg2, arg3);
|
||||
Inst(Opcode::CallHostFunction, Imm64(std::bit_cast<u64>(fn)), arg1, arg2, arg3);
|
||||
}
|
||||
|
||||
void SetTerm(const Terminal& terminal) {
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
#include <vector>
|
||||
|
||||
#include <mcl/bit/swap.hpp>
|
||||
#include <mcl/macro/architecture.hpp>
|
||||
#include "dynarmic/common/common_types.h"
|
||||
|
||||
#include "./A32/testenv.h"
|
||||
|
||||
@@ -4,14 +4,13 @@
|
||||
#include "data_manager.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/fs/path_util.h"
|
||||
#include <filesystem>
|
||||
#include <fmt/format.h>
|
||||
|
||||
namespace FrontendCommon::DataManager {
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
const std::string GetDataDir(DataDir dir, const std::string &user_id)
|
||||
const fs::path GetDataDir(DataDir dir, const std::string &user_id)
|
||||
{
|
||||
const fs::path nand_dir = Common::FS::GetEdenPath(Common::FS::EdenPath::NANDDir);
|
||||
|
||||
@@ -35,6 +34,11 @@ const std::string GetDataDir(DataDir dir, const std::string &user_id)
|
||||
return "";
|
||||
}
|
||||
|
||||
const std::string GetDataDirString(DataDir dir, const std::string &user_id)
|
||||
{
|
||||
return GetDataDir(dir, user_id).string();
|
||||
}
|
||||
|
||||
u64 ClearDir(DataDir dir, const std::string &user_id)
|
||||
{
|
||||
fs::path data_dir = GetDataDir(dir, user_id);
|
||||
@@ -65,7 +69,7 @@ u64 DataDirSize(DataDir dir)
|
||||
if (!fs::exists(data_dir))
|
||||
return 0;
|
||||
|
||||
for (const auto& entry : fs::recursive_directory_iterator(data_dir)) {
|
||||
for (const auto &entry : fs::recursive_directory_iterator(data_dir)) {
|
||||
if (!entry.is_directory()) {
|
||||
size += entry.file_size();
|
||||
}
|
||||
@@ -74,4 +78,4 @@ u64 DataDirSize(DataDir dir)
|
||||
return size;
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace FrontendCommon::DataManager
|
||||
|
||||
@@ -6,12 +6,14 @@
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include <string>
|
||||
#include <filesystem>
|
||||
|
||||
namespace FrontendCommon::DataManager {
|
||||
|
||||
enum class DataDir { Saves, UserNand, SysNand, Mods, Shaders };
|
||||
|
||||
const std::string GetDataDir(DataDir dir, const std::string &user_id = "");
|
||||
const std::filesystem::path GetDataDir(DataDir dir, const std::string &user_id = "");
|
||||
const std::string GetDataDirString(DataDir dir, const std::string &user_id = "");
|
||||
|
||||
u64 ClearDir(DataDir dir, const std::string &user_id = "");
|
||||
|
||||
|
||||
@@ -20,13 +20,14 @@ add_library(qt_common STATIC
|
||||
util/applet.h util/applet.cpp
|
||||
util/compress.h util/compress.cpp
|
||||
|
||||
abstract/qt_frontend_util.h abstract/qt_frontend_util.cpp
|
||||
abstract/frontend.h abstract/frontend.cpp
|
||||
abstract/qt_progress_dialog.h abstract/qt_progress_dialog.cpp
|
||||
|
||||
qt_string_lookup.h
|
||||
qt_compat.h
|
||||
|
||||
discord/discord.h
|
||||
util/fs.h util/fs.cpp
|
||||
)
|
||||
|
||||
create_target_directory_groups(qt_common)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "qt_frontend_util.h"
|
||||
#include "frontend.h"
|
||||
#include "qt_common/qt_common.h"
|
||||
|
||||
#ifdef YUZU_QT_WIDGETS
|
||||
@@ -1,8 +1,8 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#ifndef QT_FRONTEND_UTIL_H
|
||||
#define QT_FRONTEND_UTIL_H
|
||||
#ifndef FRONTEND_H
|
||||
#define FRONTEND_H
|
||||
|
||||
#include <QGuiApplication>
|
||||
#include "qt_common/qt_common.h"
|
||||
@@ -136,4 +136,4 @@ const QString GetSaveFileName(const QString &title,
|
||||
Options options = Options());
|
||||
|
||||
} // namespace QtCommon::Frontend
|
||||
#endif // QT_FRONTEND_UTIL_H
|
||||
#endif // FRONTEND_H
|
||||
@@ -3,11 +3,14 @@
|
||||
|
||||
#include "qt_common.h"
|
||||
#include "common/fs/fs.h"
|
||||
#include "common/fs/ryujinx_compat.h"
|
||||
|
||||
#include <QGuiApplication>
|
||||
#include <QStringLiteral>
|
||||
#include "common/logging/log.h"
|
||||
#include "core/frontend/emu_window.h"
|
||||
#include "qt_common/abstract/frontend.h"
|
||||
#include "qt_common/qt_string_lookup.h"
|
||||
|
||||
#include <QFile>
|
||||
|
||||
@@ -33,7 +36,8 @@ std::unique_ptr<Core::System> system = nullptr;
|
||||
std::shared_ptr<FileSys::RealVfsFilesystem> vfs = nullptr;
|
||||
std::unique_ptr<FileSys::ManualContentProvider> provider = nullptr;
|
||||
|
||||
Core::Frontend::WindowSystemType GetWindowSystemType() {
|
||||
Core::Frontend::WindowSystemType GetWindowSystemType()
|
||||
{
|
||||
// Determine WSI type based on Qt platform.
|
||||
QString platform_name = QGuiApplication::platformName();
|
||||
if (platform_name == QStringLiteral("windows"))
|
||||
@@ -101,9 +105,11 @@ void Init(QObject* root)
|
||||
provider = std::make_unique<FileSys::ManualContentProvider>();
|
||||
}
|
||||
|
||||
std::filesystem::path GetEdenCommand() {
|
||||
std::filesystem::path GetEdenCommand()
|
||||
{
|
||||
std::filesystem::path command;
|
||||
|
||||
// TODO: flatpak?
|
||||
QString appimage = QString::fromLocal8Bit(getenv("APPIMAGE"));
|
||||
if (!appimage.isEmpty()) {
|
||||
command = std::filesystem::path{appimage.toStdString()};
|
||||
|
||||
@@ -42,9 +42,18 @@ enum StringKey {
|
||||
MigrationTooltipClearOld,
|
||||
MigrationTooltipLinkOld,
|
||||
|
||||
// ryujinx
|
||||
KvdbNonexistent,
|
||||
KvdbNoHeader,
|
||||
KvdbInvalidMagic,
|
||||
KvdbMisaligned,
|
||||
KvdbNoImens,
|
||||
RyujinxNoSaveId,
|
||||
|
||||
};
|
||||
|
||||
static const frozen::map<StringKey, frozen::string, 21> strings = {
|
||||
static const constexpr frozen::map<StringKey, frozen::string, 27> strings = {
|
||||
// 0-4
|
||||
{SavesTooltip,
|
||||
QT_TR_NOOP("Contains game save data. DO NOT REMOVE UNLESS YOU KNOW WHAT YOU'RE DOING!")},
|
||||
{ShadersTooltip,
|
||||
@@ -54,6 +63,7 @@ static const frozen::map<StringKey, frozen::string, 21> strings = {
|
||||
{ModsTooltip, QT_TR_NOOP("Contains game mods, patches, and cheats.")},
|
||||
|
||||
// Key install
|
||||
// 5-9
|
||||
{KeyInstallSuccess, QT_TR_NOOP("Decryption Keys were successfully installed")},
|
||||
{KeyInstallInvalidDir, QT_TR_NOOP("Unable to read key directory, aborting")},
|
||||
{KeyInstallErrorFailedCopy, QT_TR_NOOP("One or more keys failed to copy.")},
|
||||
@@ -65,6 +75,7 @@ static const frozen::map<StringKey, frozen::string, 21> strings = {
|
||||
"re-dump keys.")},
|
||||
|
||||
// fw install
|
||||
// 10-14
|
||||
{FwInstallSuccess, QT_TR_NOOP("Successfully installed firmware version %1")},
|
||||
{FwInstallNoNCAs, QT_TR_NOOP("Unable to locate potential firmware NCA files")},
|
||||
{FwInstallFailedDelete, QT_TR_NOOP("Failed to delete one or more firmware files.")},
|
||||
@@ -75,6 +86,7 @@ static const frozen::map<StringKey, frozen::string, 21> strings = {
|
||||
"Eden or re-install firmware.")},
|
||||
|
||||
// migrator
|
||||
// 15-20
|
||||
{MigrationPromptPrefix, QT_TR_NOOP("Eden has detected user data for the following emulators:")},
|
||||
{MigrationPrompt,
|
||||
QT_TR_NOOP("Would you like to migrate your data for use in Eden?\n"
|
||||
@@ -93,6 +105,15 @@ static const frozen::map<StringKey, frozen::string, 21> strings = {
|
||||
{MigrationTooltipLinkOld,
|
||||
QT_TR_NOOP("Creates a filesystem link between the old directory and Eden directory.\n"
|
||||
"This is recommended if you want to share data between emulators.")},
|
||||
|
||||
// why am I writing these comments again
|
||||
// 21-26
|
||||
{KvdbNonexistent, QT_TR_NOOP("Ryujinx title database does not exist.")},
|
||||
{KvdbNoHeader, QT_TR_NOOP("Invalid header on Ryujinx title database.")},
|
||||
{KvdbInvalidMagic, QT_TR_NOOP("Invalid magic header on Ryujinx title database.")},
|
||||
{KvdbMisaligned, QT_TR_NOOP("Invalid byte alignment on Ryujinx title database.")},
|
||||
{KvdbNoImens, QT_TR_NOOP("No items found in Ryujinx title database.")},
|
||||
{RyujinxNoSaveId, QT_TR_NOOP("Title %1 not found in Ryujinx title database.")},
|
||||
};
|
||||
|
||||
static inline const QString Lookup(StringKey key)
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
#include "frontend_common/firmware_manager.h"
|
||||
|
||||
#include "compress.h"
|
||||
#include "qt_common/abstract/qt_frontend_util.h"
|
||||
#include "qt_common/abstract/frontend.h"
|
||||
#include "qt_common/abstract/qt_progress_dialog.h"
|
||||
#include "qt_common/qt_common.h"
|
||||
|
||||
@@ -404,7 +404,7 @@ void ExportDataDir(FrontendCommon::DataManager::DataDir data_dir,
|
||||
std::function<void()> callback)
|
||||
{
|
||||
using namespace QtCommon::Frontend;
|
||||
const std::string dir = FrontendCommon::DataManager::GetDataDir(data_dir, user_id);
|
||||
const std::string dir = FrontendCommon::DataManager::GetDataDirString(data_dir, user_id);
|
||||
|
||||
const QString zip_dump_location = GetSaveFileName(tr("Select Export Location"),
|
||||
tr("%1.zip").arg(name),
|
||||
@@ -468,7 +468,7 @@ void ImportDataDir(FrontendCommon::DataManager::DataDir data_dir,
|
||||
const std::string& user_id,
|
||||
std::function<void()> callback)
|
||||
{
|
||||
const std::string dir = FrontendCommon::DataManager::GetDataDir(data_dir, user_id);
|
||||
const std::string dir = FrontendCommon::DataManager::GetDataDirString(data_dir, user_id);
|
||||
|
||||
using namespace QtCommon::Frontend;
|
||||
|
||||
|
||||
@@ -25,7 +25,8 @@ enum class FirmwareInstallResult {
|
||||
|
||||
inline const QString GetFirmwareInstallResultString(FirmwareInstallResult result)
|
||||
{
|
||||
return QtCommon::StringLookup::Lookup(static_cast<StringLookup::StringKey>((int) result + (int) QtCommon::StringLookup::FwInstallSuccess));
|
||||
return QtCommon::StringLookup::Lookup(static_cast<StringLookup::StringKey>(
|
||||
(int) result + (int) QtCommon::StringLookup::FwInstallSuccess));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -36,7 +37,8 @@ inline const QString GetFirmwareInstallResultString(FirmwareInstallResult result
|
||||
inline const QString GetKeyInstallResultString(FirmwareManager::KeyInstallResult result)
|
||||
{
|
||||
// this can probably be made into a common function of sorts
|
||||
return QtCommon::StringLookup::Lookup(static_cast<StringLookup::StringKey>((int) result + (int) QtCommon::StringLookup::KeyInstallSuccess));
|
||||
return QtCommon::StringLookup::Lookup(static_cast<StringLookup::StringKey>(
|
||||
(int) result + (int) QtCommon::StringLookup::KeyInstallSuccess));
|
||||
}
|
||||
|
||||
void InstallFirmware(const QString &location, bool recursive);
|
||||
|
||||
130
src/qt_common/util/fs.cpp
Normal file
130
src/qt_common/util/fs.cpp
Normal file
@@ -0,0 +1,130 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include <filesystem>
|
||||
#include "fs.h"
|
||||
#include "common/fs/ryujinx_compat.h"
|
||||
#include "common/fs/symlink.h"
|
||||
#include "frontend_common/data_manager.h"
|
||||
#include "qt_common/abstract/frontend.h"
|
||||
#include "qt_common/qt_string_lookup.h"
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace QtCommon::FS {
|
||||
|
||||
void LinkRyujinx(std::filesystem::path &from, std::filesystem::path &to)
|
||||
{
|
||||
std::error_code ec;
|
||||
|
||||
// "ignore" errors--if the dir fails to be deleted, error handling later will handle it
|
||||
fs::remove_all(to, ec);
|
||||
|
||||
if (Common::FS::CreateSymlink(from, to)) {
|
||||
QtCommon::Frontend::Information(tr("Linked Save Data"), tr("Save data has been linked."));
|
||||
} else {
|
||||
QtCommon::Frontend::Critical(
|
||||
tr("Failed to link save data"),
|
||||
tr("Could not link directory:\n\t%1\nTo:\n\t%2").arg(QString::fromStdString(from.string()), QString::fromStdString(to.string())));
|
||||
}
|
||||
}
|
||||
|
||||
bool CheckUnlink(const fs::path &eden_dir, const fs::path &ryu_dir)
|
||||
{
|
||||
bool eden_link = Common::FS::IsSymlink(eden_dir);
|
||||
bool ryu_link = Common::FS::IsSymlink(ryu_dir);
|
||||
|
||||
if (!(eden_link || ryu_link))
|
||||
return false;
|
||||
|
||||
auto result = QtCommon::Frontend::Warning(
|
||||
tr("Already Linked"),
|
||||
tr("This title is already linked to Ryujinx. Would you like to unlink it?"),
|
||||
QtCommon::Frontend::StandardButton::Yes | QtCommon::Frontend::StandardButton::No);
|
||||
|
||||
if (result != QtCommon::Frontend::StandardButton::Yes)
|
||||
return true;
|
||||
|
||||
fs::path linked;
|
||||
fs::path orig;
|
||||
|
||||
if (eden_link) {
|
||||
linked = eden_dir;
|
||||
orig = ryu_dir;
|
||||
} else {
|
||||
linked = ryu_dir;
|
||||
orig = eden_dir;
|
||||
}
|
||||
|
||||
// first cleanup the symlink/junction,
|
||||
try {
|
||||
// NB: do NOT use remove_all, as Windows treats this as a remove_all to the target,
|
||||
// NOT the junction
|
||||
fs::remove(linked);
|
||||
} catch (std::exception &e) {
|
||||
QtCommon::Frontend::Critical(
|
||||
tr("Failed to unlink old directory"),
|
||||
tr("OS returned error: %1").arg(QString::fromStdString(e.what())));
|
||||
return true;
|
||||
}
|
||||
|
||||
// then COPY the other dir
|
||||
try {
|
||||
fs::copy(orig, linked, fs::copy_options::recursive);
|
||||
} catch (std::exception &e) {
|
||||
QtCommon::Frontend::Critical(
|
||||
tr("Failed to copy save data"),
|
||||
tr("OS returned error: %1").arg(QString::fromStdString(e.what())));
|
||||
}
|
||||
|
||||
QtCommon::Frontend::Information(
|
||||
tr("Unlink Successful"),
|
||||
tr("Successfully unlinked Ryujinx save data. Save data has been kept intact."));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
u64 GetRyujinxSaveID(const u64 &program_id)
|
||||
{
|
||||
auto path = Common::FS::GetKvdbPath();
|
||||
std::vector<Common::FS::IMEN> imens;
|
||||
Common::FS::IMENReadResult res = Common::FS::ReadKvdb(path, imens);
|
||||
|
||||
if (res == Common::FS::IMENReadResult::Success) {
|
||||
// TODO: this can probably be done with std::find_if but I'm lazy
|
||||
for (const Common::FS::IMEN &imen : imens) {
|
||||
if (imen.title_id == program_id)
|
||||
return imen.save_id;
|
||||
}
|
||||
|
||||
QtCommon::Frontend::Critical(
|
||||
tr("Could not find Ryujinx save data"),
|
||||
StringLookup::Lookup(StringLookup::RyujinxNoSaveId).arg(program_id, 0, 16));
|
||||
} else {
|
||||
// TODO: make this long thing a function or something
|
||||
QString caption = StringLookup::Lookup(
|
||||
static_cast<StringLookup::StringKey>((int) res + (int) StringLookup::KvdbNonexistent));
|
||||
QtCommon::Frontend::Critical(tr("Could not find Ryujinx save data"), caption);
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
std::optional<std::pair<fs::path, fs::path> > GetEmuPaths(
|
||||
const u64 program_id, const u64 save_id, const std::string &user_id)
|
||||
{
|
||||
fs::path ryu_dir = Common::FS::GetRyuSavePath(save_id);
|
||||
|
||||
if (user_id.empty())
|
||||
return std::nullopt;
|
||||
|
||||
std::string hex_program = fmt::format("{:016X}", program_id);
|
||||
fs::path eden_dir
|
||||
= FrontendCommon::DataManager::GetDataDir(FrontendCommon::DataManager::DataDir::Saves,
|
||||
user_id)
|
||||
/ hex_program;
|
||||
|
||||
return std::make_pair(eden_dir, ryu_dir);
|
||||
}
|
||||
|
||||
} // namespace QtCommon::FS
|
||||
22
src/qt_common/util/fs.h
Normal file
22
src/qt_common/util/fs.h
Normal file
@@ -0,0 +1,22 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include <filesystem>
|
||||
#include <optional>
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace QtCommon::FS {
|
||||
|
||||
void LinkRyujinx(std::filesystem::path &from, std::filesystem::path &to);
|
||||
u64 GetRyujinxSaveID(const u64 &program_id);
|
||||
|
||||
/// @brief {eden, ryu}
|
||||
std::optional<std::pair<std::filesystem::path, std::filesystem::path>> GetEmuPaths(
|
||||
const u64 program_id, const u64 save_id, const std::string &user_id);
|
||||
|
||||
/// returns FALSE if the dirs are NOT linked
|
||||
bool CheckUnlink(const std::filesystem::path &eden_dir, const std::filesystem::path &ryu_dir);
|
||||
|
||||
} // namespace QtCommon::FS
|
||||
@@ -8,7 +8,7 @@
|
||||
#include "core/file_sys/savedata_factory.h"
|
||||
#include "core/hle/service/am/am_types.h"
|
||||
#include "frontend_common/content_manager.h"
|
||||
#include "qt_common/abstract/qt_frontend_util.h"
|
||||
#include "qt_common/abstract/frontend.h"
|
||||
#include "qt_common/config/uisettings.h"
|
||||
#include "qt_common/qt_common.h"
|
||||
#include "yuzu/util/util.h"
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include <QUrl>
|
||||
#include "common/fs/fs.h"
|
||||
#include "common/fs/path_util.h"
|
||||
#include "qt_common/abstract/qt_frontend_util.h"
|
||||
#include "qt_common/abstract/frontend.h"
|
||||
#include <fmt/format.h>
|
||||
|
||||
namespace QtCommon::Path {
|
||||
|
||||
@@ -7,9 +7,7 @@
|
||||
#include <algorithm>
|
||||
#include <bit>
|
||||
#include <optional>
|
||||
#include <unordered_map>
|
||||
#include <tuple>
|
||||
#include <limits>
|
||||
|
||||
#include <boost/container/small_vector.hpp>
|
||||
|
||||
#include "shader_recompiler/environment.h"
|
||||
@@ -179,93 +177,6 @@ bool IsBindless(const IR::Inst& inst) {
|
||||
bool IsTextureInstruction(const IR::Inst& inst) {
|
||||
return IndexedInstruction(inst) != IR::Opcode::Void;
|
||||
}
|
||||
// Per-pass caches
|
||||
struct CbufWordKey {
|
||||
u32 index;
|
||||
u32 offset;
|
||||
bool operator==(const CbufWordKey& o) const noexcept {
|
||||
return index == o.index && offset == o.offset;
|
||||
}
|
||||
};
|
||||
struct CbufWordKeyHash {
|
||||
size_t operator()(const CbufWordKey& k) const noexcept {
|
||||
return (static_cast<size_t>(k.index) << 32) ^ k.offset;
|
||||
}
|
||||
};
|
||||
|
||||
struct HandleKey {
|
||||
u32 index, offset, shift_left;
|
||||
u32 sec_index, sec_offset, sec_shift_left;
|
||||
bool has_secondary;
|
||||
bool operator==(const HandleKey& o) const noexcept {
|
||||
return std::tie(index, offset, shift_left,
|
||||
sec_index, sec_offset, sec_shift_left, has_secondary)
|
||||
== std::tie(o.index, o.offset, o.shift_left,
|
||||
o.sec_index, o.sec_offset, o.sec_shift_left, o.has_secondary);
|
||||
}
|
||||
};
|
||||
struct HandleKeyHash {
|
||||
size_t operator()(const HandleKey& k) const noexcept {
|
||||
size_t h = (static_cast<size_t>(k.index) << 32) ^ k.offset;
|
||||
h ^= (static_cast<size_t>(k.shift_left) << 1);
|
||||
h ^= (static_cast<size_t>(k.sec_index) << 33) ^ (static_cast<size_t>(k.sec_offset) << 2);
|
||||
h ^= (static_cast<size_t>(k.sec_shift_left) << 3);
|
||||
h ^= k.has_secondary ? 0x9e3779b97f4a7c15ULL : 0ULL;
|
||||
return h;
|
||||
}
|
||||
};
|
||||
|
||||
// Thread-local(may implement multithreading in future *wink*)
|
||||
thread_local std::unordered_map<CbufWordKey, u32, CbufWordKeyHash> g_cbuf_word_cache;
|
||||
thread_local std::unordered_map<HandleKey, u32, HandleKeyHash> g_handle_cache;
|
||||
thread_local std::unordered_map<const IR::Inst*, ConstBufferAddr> g_track_cache;
|
||||
|
||||
static inline u32 ReadCbufCached(Environment& env, u32 index, u32 offset) {
|
||||
const CbufWordKey k{index, offset};
|
||||
if (auto it = g_cbuf_word_cache.find(k); it != g_cbuf_word_cache.end()) return it->second;
|
||||
const u32 v = env.ReadCbufValue(index, offset);
|
||||
g_cbuf_word_cache.emplace(k, v);
|
||||
return v;
|
||||
}
|
||||
|
||||
static inline u32 GetTextureHandleCached(Environment& env, const ConstBufferAddr& cbuf) {
|
||||
const u32 sec_idx = cbuf.has_secondary ? cbuf.secondary_index : cbuf.index;
|
||||
const u32 sec_off = cbuf.has_secondary ? cbuf.secondary_offset : cbuf.offset;
|
||||
const HandleKey hk{cbuf.index, cbuf.offset, cbuf.shift_left,
|
||||
sec_idx, sec_off, cbuf.secondary_shift_left, cbuf.has_secondary};
|
||||
if (auto it = g_handle_cache.find(hk); it != g_handle_cache.end()) return it->second;
|
||||
|
||||
const u32 lhs = ReadCbufCached(env, cbuf.index, cbuf.offset) << cbuf.shift_left;
|
||||
const u32 rhs = ReadCbufCached(env, sec_idx, sec_off) << cbuf.secondary_shift_left;
|
||||
const u32 handle = lhs | rhs;
|
||||
g_handle_cache.emplace(hk, handle);
|
||||
return handle;
|
||||
}
|
||||
|
||||
// Cached variants of existing helpers
|
||||
static inline TextureType ReadTextureTypeCached(Environment& env, const ConstBufferAddr& cbuf) {
|
||||
return env.ReadTextureType(GetTextureHandleCached(env, cbuf));
|
||||
}
|
||||
static inline TexturePixelFormat ReadTexturePixelFormatCached(Environment& env,
|
||||
const ConstBufferAddr& cbuf) {
|
||||
return env.ReadTexturePixelFormat(GetTextureHandleCached(env, cbuf));
|
||||
}
|
||||
static inline bool IsTexturePixelFormatIntegerCached(Environment& env,
|
||||
const ConstBufferAddr& cbuf) {
|
||||
return env.IsTexturePixelFormatInteger(GetTextureHandleCached(env, cbuf));
|
||||
}
|
||||
|
||||
|
||||
std::optional<ConstBufferAddr> Track(const IR::Value& value, Environment& env);
|
||||
static inline std::optional<ConstBufferAddr> TrackCached(const IR::Value& v, Environment& env) {
|
||||
if (const IR::Inst* key = v.InstRecursive()) {
|
||||
if (auto it = g_track_cache.find(key); it != g_track_cache.end()) return it->second;
|
||||
auto found = Track(v, env);
|
||||
if (found) g_track_cache.emplace(key, *found);
|
||||
return found;
|
||||
}
|
||||
return Track(v, env);
|
||||
}
|
||||
|
||||
std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst, Environment& env);
|
||||
|
||||
@@ -292,7 +203,7 @@ std::optional<u32> TryGetConstant(IR::Value& value, Environment& env) {
|
||||
return std::nullopt;
|
||||
}
|
||||
const auto offset_number = offset.U32();
|
||||
return ReadCbufCached(env, index_number, offset_number);
|
||||
return env.ReadCbufValue(index_number, offset_number);
|
||||
}
|
||||
|
||||
std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst, Environment& env) {
|
||||
@@ -300,8 +211,8 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst, Environme
|
||||
default:
|
||||
return std::nullopt;
|
||||
case IR::Opcode::BitwiseOr32: {
|
||||
std::optional lhs{TrackCached(inst->Arg(0), env)};
|
||||
std::optional rhs{TrackCached(inst->Arg(1), env)};
|
||||
std::optional lhs{Track(inst->Arg(0), env)};
|
||||
std::optional rhs{Track(inst->Arg(1), env)};
|
||||
if (!lhs || !rhs) {
|
||||
return std::nullopt;
|
||||
}
|
||||
@@ -331,7 +242,7 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst, Environme
|
||||
if (!shift.IsImmediate()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
std::optional lhs{TrackCached(inst->Arg(0), env)};
|
||||
std::optional lhs{Track(inst->Arg(0), env)};
|
||||
if (lhs) {
|
||||
lhs->shift_left = shift.U32();
|
||||
}
|
||||
@@ -360,7 +271,7 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst, Environme
|
||||
return std::nullopt;
|
||||
} while (false);
|
||||
}
|
||||
std::optional lhs{TrackCached(op1, env)};
|
||||
std::optional lhs{Track(op1, env)};
|
||||
if (lhs) {
|
||||
lhs->shift_left = static_cast<u32>(std::countr_zero(op2.U32()));
|
||||
}
|
||||
@@ -435,7 +346,7 @@ static ConstBufferAddr last_valid_addr = ConstBufferAddr{
|
||||
TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) {
|
||||
ConstBufferAddr addr;
|
||||
if (IsBindless(inst)) {
|
||||
const std::optional<ConstBufferAddr> track_addr{TrackCached(inst.Arg(0), env)};
|
||||
const std::optional<ConstBufferAddr> track_addr{Track(inst.Arg(0), env)};
|
||||
|
||||
if (!track_addr) {
|
||||
//throw NotImplementedException("Failed to track bindless texture constant buffer");
|
||||
@@ -473,15 +384,15 @@ u32 GetTextureHandle(Environment& env, const ConstBufferAddr& cbuf) {
|
||||
return lhs_raw | rhs_raw;
|
||||
}
|
||||
|
||||
[[maybe_unused]]TextureType ReadTextureType(Environment& env, const ConstBufferAddr& cbuf) {
|
||||
TextureType ReadTextureType(Environment& env, const ConstBufferAddr& cbuf) {
|
||||
return env.ReadTextureType(GetTextureHandle(env, cbuf));
|
||||
}
|
||||
|
||||
[[maybe_unused]]TexturePixelFormat ReadTexturePixelFormat(Environment& env, const ConstBufferAddr& cbuf) {
|
||||
TexturePixelFormat ReadTexturePixelFormat(Environment& env, const ConstBufferAddr& cbuf) {
|
||||
return env.ReadTexturePixelFormat(GetTextureHandle(env, cbuf));
|
||||
}
|
||||
|
||||
[[maybe_unused]]bool IsTexturePixelFormatInteger(Environment& env, const ConstBufferAddr& cbuf) {
|
||||
bool IsTexturePixelFormatInteger(Environment& env, const ConstBufferAddr& cbuf) {
|
||||
return env.IsTexturePixelFormatInteger(GetTextureHandle(env, cbuf));
|
||||
}
|
||||
|
||||
@@ -632,10 +543,6 @@ void PatchTexelFetch(IR::Block& block, IR::Inst& inst, TexturePixelFormat pixel_
|
||||
} // Anonymous namespace
|
||||
|
||||
void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo& host_info) {
|
||||
// reset per-pass caches
|
||||
g_cbuf_word_cache.clear();
|
||||
g_handle_cache.clear();
|
||||
g_track_cache.clear();
|
||||
TextureInstVector to_replace;
|
||||
for (IR::Block* const block : program.post_order_blocks) {
|
||||
for (IR::Inst& inst : block->Instructions()) {
|
||||
@@ -646,9 +553,11 @@ void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo
|
||||
}
|
||||
}
|
||||
// Sort instructions to visit textures by constant buffer index, then by offset
|
||||
std::ranges::sort(to_replace, [](const auto& a, const auto& b) {
|
||||
if (a.cbuf.index != b.cbuf.index) return a.cbuf.index < b.cbuf.index;
|
||||
return a.cbuf.offset < b.cbuf.offset;
|
||||
std::ranges::sort(to_replace, [](const auto& lhs, const auto& rhs) {
|
||||
return lhs.cbuf.offset < rhs.cbuf.offset;
|
||||
});
|
||||
std::stable_sort(to_replace.begin(), to_replace.end(), [](const auto& lhs, const auto& rhs) {
|
||||
return lhs.cbuf.index < rhs.cbuf.index;
|
||||
});
|
||||
Descriptors descriptors{
|
||||
program.info.texture_buffer_descriptors,
|
||||
@@ -666,14 +575,14 @@ void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo
|
||||
bool is_multisample{false};
|
||||
switch (inst->GetOpcode()) {
|
||||
case IR::Opcode::ImageQueryDimensions:
|
||||
flags.type.Assign(ReadTextureTypeCached(env, cbuf));
|
||||
flags.type.Assign(ReadTextureType(env, cbuf));
|
||||
inst->SetFlags(flags);
|
||||
break;
|
||||
case IR::Opcode::ImageSampleImplicitLod:
|
||||
if (flags.type != TextureType::Color2D) {
|
||||
break;
|
||||
}
|
||||
if (ReadTextureTypeCached(env, cbuf) == TextureType::Color2DRect) {
|
||||
if (ReadTextureType(env, cbuf) == TextureType::Color2DRect) {
|
||||
PatchImageSampleImplicitLod(*texture_inst.block, *texture_inst.inst);
|
||||
}
|
||||
break;
|
||||
@@ -687,7 +596,7 @@ void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo
|
||||
if (flags.type != TextureType::Color1D) {
|
||||
break;
|
||||
}
|
||||
if (ReadTextureTypeCached(env, cbuf) == TextureType::Buffer) {
|
||||
if (ReadTextureType(env, cbuf) == TextureType::Buffer) {
|
||||
// Replace with the bound texture type only when it's a texture buffer
|
||||
// If the instruction is 1D and the bound type is 2D, don't change the code and let
|
||||
// the rasterizer robustness handle it
|
||||
@@ -718,7 +627,7 @@ void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo
|
||||
}
|
||||
const bool is_written{inst->GetOpcode() != IR::Opcode::ImageRead};
|
||||
const bool is_read{inst->GetOpcode() != IR::Opcode::ImageWrite};
|
||||
const bool is_integer{IsTexturePixelFormatIntegerCached(env, cbuf)};
|
||||
const bool is_integer{IsTexturePixelFormatInteger(env, cbuf)};
|
||||
if (flags.type == TextureType::Buffer) {
|
||||
index = descriptors.Add(ImageBufferDescriptor{
|
||||
.format = flags.image_format,
|
||||
@@ -782,16 +691,16 @@ void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo
|
||||
if (cbuf.count > 1) {
|
||||
const auto insert_point{IR::Block::InstructionList::s_iterator_to(*inst)};
|
||||
IR::IREmitter ir{*texture_inst.block, insert_point};
|
||||
const IR::U32 shift{ir.Imm32(DESCRIPTOR_SIZE_SHIFT)};
|
||||
inst->SetArg(0, ir.UMin(ir.ShiftRightLogical(cbuf.dynamic_offset, shift),
|
||||
ir.Imm32(DESCRIPTOR_SIZE - 1)));
|
||||
const IR::U32 shift{ir.Imm32(std::countr_zero(DESCRIPTOR_SIZE))};
|
||||
inst->SetArg(0, ir.UMin(ir.ShiftRightArithmetic(cbuf.dynamic_offset, shift),
|
||||
ir.Imm32(DESCRIPTOR_SIZE - 1)));
|
||||
} else {
|
||||
inst->SetArg(0, IR::Value{});
|
||||
}
|
||||
|
||||
if (!host_info.support_snorm_render_buffer && inst->GetOpcode() == IR::Opcode::ImageFetch &&
|
||||
flags.type == TextureType::Buffer) {
|
||||
const auto pixel_format = ReadTexturePixelFormatCached(env, cbuf);
|
||||
const auto pixel_format = ReadTexturePixelFormat(env, cbuf);
|
||||
if (IsPixelFormatSNorm(pixel_format)) {
|
||||
PatchTexelFetch(*texture_inst.block, *texture_inst.inst, pixel_format);
|
||||
}
|
||||
|
||||
@@ -52,10 +52,20 @@ ImageInfo::ImageInfo(const TICEntry& config) noexcept {
|
||||
ASSERT(!config.IsPitchLinear());
|
||||
}
|
||||
TextureType tex_type = config.texture_type;
|
||||
if (tex_type == TextureType::Texture1D && (config.Depth() > 1 || config.BaseLayer() != 0)) {
|
||||
tex_type = TextureType::Texture1DArray;
|
||||
} else if (tex_type == TextureType::Texture2D && (config.Depth() > 1 || config.BaseLayer() != 0)) {
|
||||
tex_type = TextureType::Texture2DArray;
|
||||
if ((config.Depth() > 1 || config.BaseLayer() != 0) && config.BaseLayer() < config.Depth()) {
|
||||
switch (tex_type) {
|
||||
case TextureType::Texture1D:
|
||||
tex_type = TextureType::Texture1DArray;
|
||||
break;
|
||||
case TextureType::Texture2D:
|
||||
tex_type = TextureType::Texture2DArray;
|
||||
break;
|
||||
case TextureType::TextureCubemap:
|
||||
tex_type = TextureType::TextureCubeArray;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
switch (tex_type) {
|
||||
case TextureType::Texture1D:
|
||||
@@ -76,7 +86,6 @@ ImageInfo::ImageInfo(const TICEntry& config) noexcept {
|
||||
break;
|
||||
case TextureType::Texture2D:
|
||||
case TextureType::Texture2DNoMipmap:
|
||||
ASSERT(config.BaseLayer() == 0);
|
||||
ASSERT(config.Depth() == 1);
|
||||
type = config.IsPitchLinear() ? ImageType::Linear : ImageType::e2D;
|
||||
rescaleable = !config.IsPitchLinear();
|
||||
@@ -106,6 +115,7 @@ ImageInfo::ImageInfo(const TICEntry& config) noexcept {
|
||||
case TextureType::TextureCubeArray:
|
||||
UNIMPLEMENTED_IF(config.load_store_hint != 0);
|
||||
ASSERT(config.Depth() > 0);
|
||||
ASSERT(config.BaseLayer() < config.Depth());
|
||||
type = ImageType::e2D;
|
||||
size.width = config.Width();
|
||||
size.height = config.Height();
|
||||
|
||||
@@ -41,10 +41,20 @@ ImageViewInfo::ImageViewInfo(const TICEntry& config, s32 base_layer) noexcept
|
||||
};
|
||||
range.extent.levels = config.res_max_mip_level - config.res_min_mip_level + 1;
|
||||
TextureType tex_type = config.texture_type;
|
||||
if (tex_type == TextureType::Texture1D && (config.Depth() > 1 || base_layer != 0)) {
|
||||
tex_type = TextureType::Texture1DArray;
|
||||
} else if (tex_type == TextureType::Texture2D && (config.Depth() > 1 || base_layer != 0)) {
|
||||
tex_type = TextureType::Texture2DArray;
|
||||
if ((config.Depth() > 1 || base_layer != 0) && static_cast<u32>(base_layer) < config.Depth()) {
|
||||
switch (tex_type) {
|
||||
case TextureType::Texture1D:
|
||||
tex_type = TextureType::Texture1DArray;
|
||||
break;
|
||||
case TextureType::Texture2D:
|
||||
tex_type = TextureType::Texture2DArray;
|
||||
break;
|
||||
case TextureType::TextureCubemap:
|
||||
tex_type = TextureType::TextureCubeArray;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
switch (tex_type) {
|
||||
case TextureType::Texture1D:
|
||||
@@ -63,7 +73,6 @@ ImageViewInfo::ImageViewInfo(const TICEntry& config, s32 base_layer) noexcept
|
||||
case TextureType::Texture2D:
|
||||
case TextureType::Texture2DNoMipmap:
|
||||
ASSERT(config.Depth() == 1);
|
||||
ASSERT(base_layer == 0);
|
||||
type = config.normalized_coords ? ImageViewType::e2D : ImageViewType::Rect;
|
||||
range.extent.layers = 1;
|
||||
break;
|
||||
|
||||
@@ -236,6 +236,7 @@ add_executable(yuzu
|
||||
|
||||
data_dialog.h data_dialog.cpp data_dialog.ui
|
||||
data_widget.ui
|
||||
ryujinx_dialog.h ryujinx_dialog.cpp ryujinx_dialog.ui
|
||||
)
|
||||
|
||||
set_target_properties(yuzu PROPERTIES OUTPUT_NAME "eden")
|
||||
|
||||
@@ -95,7 +95,7 @@ void DataWidget::open()
|
||||
user_id = selectProfile();
|
||||
}
|
||||
QDesktopServices::openUrl(QUrl::fromLocalFile(
|
||||
QString::fromStdString(FrontendCommon::DataManager::GetDataDir(m_dir, user_id))));
|
||||
QString::fromStdString(FrontendCommon::DataManager::GetDataDirString(m_dir, user_id))));
|
||||
}
|
||||
|
||||
void DataWidget::upload()
|
||||
|
||||
@@ -542,6 +542,7 @@ void GameList::PopupContextMenu(const QPoint& menu_location) {
|
||||
}
|
||||
|
||||
void GameList::AddGamePopup(QMenu& context_menu, u64 program_id, const std::string& path) {
|
||||
// TODO(crueter): Refactor this and make it less bad
|
||||
QAction* favorite = context_menu.addAction(tr("Favorite"));
|
||||
context_menu.addSeparator();
|
||||
QAction* start_game = context_menu.addAction(tr("Start Game"));
|
||||
@@ -581,6 +582,7 @@ void GameList::AddGamePopup(QMenu& context_menu, u64 program_id, const std::stri
|
||||
#endif
|
||||
context_menu.addSeparator();
|
||||
QAction* properties = context_menu.addAction(tr("Configure Game"));
|
||||
QAction* ryujinx = context_menu.addAction(tr("Link to Ryujinx"));
|
||||
|
||||
favorite->setVisible(program_id != 0);
|
||||
favorite->setCheckable(true);
|
||||
@@ -662,6 +664,9 @@ void GameList::AddGamePopup(QMenu& context_menu, u64 program_id, const std::stri
|
||||
#endif
|
||||
connect(properties, &QAction::triggered,
|
||||
[this, path]() { emit OpenPerGameGeneralRequested(path); });
|
||||
|
||||
connect(ryujinx, &QAction::triggered, [this, program_id]() { emit LinkToRyujinxRequested(program_id);
|
||||
});
|
||||
};
|
||||
|
||||
void GameList::AddCustomDirPopup(QMenu& context_menu, QModelIndex selected) {
|
||||
|
||||
@@ -113,6 +113,7 @@ signals:
|
||||
void NavigateToGamedbEntryRequested(u64 program_id,
|
||||
const CompatibilityList& compatibility_list);
|
||||
void OpenPerGameGeneralRequested(const std::string& file);
|
||||
void LinkToRyujinxRequested(const u64 &program_id);
|
||||
void OpenDirectory(const QString& directory);
|
||||
void AddDirectory();
|
||||
void ShowList(bool show);
|
||||
|
||||
@@ -6,10 +6,12 @@
|
||||
#include "core/tools/renderdoc.h"
|
||||
#include "frontend_common/firmware_manager.h"
|
||||
#include "qt_common/qt_common.h"
|
||||
#include "qt_common/abstract/frontend.h"
|
||||
#include "qt_common/util/content.h"
|
||||
#include "qt_common/util/game.h"
|
||||
#include "qt_common/util/meta.h"
|
||||
#include "qt_common/util/path.h"
|
||||
#include "qt_common/util/fs.h"
|
||||
#include <clocale>
|
||||
#include <cmath>
|
||||
#include <memory>
|
||||
@@ -108,6 +110,7 @@ static FileSys::VirtualFile VfsDirectoryCreateFileWrapper(const FileSys::Virtual
|
||||
#include "common/detached_tasks.h"
|
||||
#include "common/fs/fs.h"
|
||||
#include "common/fs/path_util.h"
|
||||
#include "common/fs/ryujinx_compat.h"
|
||||
#include "common/literals.h"
|
||||
#include "common/logging/backend.h"
|
||||
#include "common/logging/log.h"
|
||||
@@ -160,6 +163,7 @@ static FileSys::VirtualFile VfsDirectoryCreateFileWrapper(const FileSys::Virtual
|
||||
#include "yuzu/debugger/wait_tree.h"
|
||||
#include "yuzu/data_dialog.h"
|
||||
#include "yuzu/deps_dialog.h"
|
||||
#include "yuzu/ryujinx_dialog.h"
|
||||
#include "qt_common/discord/discord.h"
|
||||
#include "yuzu/game_list.h"
|
||||
#include "yuzu/game_list_p.h"
|
||||
@@ -1597,6 +1601,8 @@ void GMainWindow::ConnectWidgetEvents() {
|
||||
|
||||
connect(game_list, &GameList::OpenPerGameGeneralRequested, this,
|
||||
&GMainWindow::OnGameListOpenPerGameProperties);
|
||||
connect(game_list, &GameList::LinkToRyujinxRequested, this,
|
||||
&GMainWindow::OnLinkToRyujinx);
|
||||
|
||||
connect(this, &GMainWindow::UpdateInstallProgress, this,
|
||||
&GMainWindow::IncrementInstallProgress);
|
||||
@@ -2875,6 +2881,61 @@ void GMainWindow::OnGameListOpenPerGameProperties(const std::string& file) {
|
||||
OpenPerGameConfiguration(title_id, file);
|
||||
}
|
||||
|
||||
std::string GMainWindow::GetProfileID()
|
||||
{
|
||||
const auto select_profile = [this] {
|
||||
const Core::Frontend::ProfileSelectParameters parameters{
|
||||
.mode = Service::AM::Frontend::UiMode::UserSelector,
|
||||
.invalid_uid_list = {},
|
||||
.display_options = {},
|
||||
.purpose = Service::AM::Frontend::UserSelectionPurpose::General,
|
||||
};
|
||||
QtProfileSelectionDialog dialog(*QtCommon::system, this, parameters);
|
||||
dialog.setWindowFlags(Qt::Dialog | Qt::CustomizeWindowHint | Qt::WindowTitleHint
|
||||
| Qt::WindowSystemMenuHint | Qt::WindowCloseButtonHint);
|
||||
dialog.setWindowModality(Qt::WindowModal);
|
||||
|
||||
if (dialog.exec() == QDialog::Rejected) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return dialog.GetIndex();
|
||||
};
|
||||
|
||||
const auto index = select_profile();
|
||||
if (index == -1) {
|
||||
return "";
|
||||
}
|
||||
|
||||
const auto uuid = QtCommon::system->GetProfileManager().GetUser(static_cast<std::size_t>(index));
|
||||
ASSERT(uuid);
|
||||
|
||||
const auto user_id = uuid->AsU128();
|
||||
|
||||
return fmt::format("{:016X}{:016X}", user_id[1], user_id[0]);
|
||||
}
|
||||
|
||||
void GMainWindow::OnLinkToRyujinx(const u64& program_id)
|
||||
{
|
||||
u64 save_id = QtCommon::FS::GetRyujinxSaveID(program_id);
|
||||
if (save_id == (u64) -1)
|
||||
return;
|
||||
|
||||
const std::string user_id = GetProfileID();
|
||||
|
||||
auto paths = QtCommon::FS::GetEmuPaths(program_id, save_id, user_id);
|
||||
if (!paths)
|
||||
return;
|
||||
|
||||
auto eden_dir = paths.value().first;
|
||||
auto ryu_dir = paths.value().second;
|
||||
|
||||
if (!QtCommon::FS::CheckUnlink(eden_dir, ryu_dir)) {
|
||||
RyujinxDialog dialog(eden_dir, ryu_dir, this);
|
||||
dialog.exec();
|
||||
}
|
||||
}
|
||||
|
||||
void GMainWindow::OnMenuLoadFile() {
|
||||
if (is_load_file_select_active) {
|
||||
return;
|
||||
|
||||
@@ -358,6 +358,7 @@ private slots:
|
||||
void OnGameListAddDirectory();
|
||||
void OnGameListShowList(bool show);
|
||||
void OnGameListOpenPerGameProperties(const std::string& file);
|
||||
void OnLinkToRyujinx(const u64& program_id);
|
||||
void OnMenuLoadFile();
|
||||
void OnMenuLoadFolder();
|
||||
void IncrementInstallProgress();
|
||||
@@ -470,6 +471,8 @@ private:
|
||||
QMessageBox::StandardButtons(QMessageBox::Yes | QMessageBox::No),
|
||||
QMessageBox::StandardButton defaultButton = QMessageBox::NoButton);
|
||||
|
||||
std::string GetProfileID();
|
||||
|
||||
std::unique_ptr<Ui::MainWindow> ui;
|
||||
|
||||
std::unique_ptr<DiscordRPC::DiscordInterface> discord_rpc;
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "migration_worker.h"
|
||||
#include "common/fs/symlink.h"
|
||||
|
||||
#include <QMap>
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
@@ -37,7 +38,7 @@ void MigrationWorker::process()
|
||||
try {
|
||||
fs::remove_all(eden_dir);
|
||||
} catch (fs::filesystem_error &_) {
|
||||
// ignore because linux does stupid crap sometimes.
|
||||
// ignore because linux does stupid crap sometimes
|
||||
}
|
||||
|
||||
switch (strategy) {
|
||||
@@ -46,7 +47,7 @@ void MigrationWorker::process()
|
||||
|
||||
// Windows 11 has random permission nonsense to deal with.
|
||||
try {
|
||||
fs::create_directory_symlink(legacy_user_dir, eden_dir);
|
||||
Common::FS::CreateSymlink(legacy_user_dir, eden_dir);
|
||||
} catch (const fs::filesystem_error &e) {
|
||||
emit error(tr("Linking the old directory failed. You may need to re-run with "
|
||||
"administrative privileges on Windows.\nOS gave error: %1")
|
||||
@@ -58,11 +59,11 @@ void MigrationWorker::process()
|
||||
// are already children of the root directory
|
||||
#ifndef WIN32
|
||||
if (fs::is_directory(legacy_config_dir)) {
|
||||
fs::create_directory_symlink(legacy_config_dir, config_dir);
|
||||
Common::FS::CreateSymlink(legacy_config_dir, config_dir);
|
||||
}
|
||||
|
||||
if (fs::is_directory(legacy_cache_dir)) {
|
||||
fs::create_directory_symlink(legacy_cache_dir, cache_dir);
|
||||
Common::FS::CreateSymlink(legacy_cache_dir, cache_dir);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -12,9 +12,9 @@ using namespace Common::FS;
|
||||
typedef struct Emulator {
|
||||
const char *m_name;
|
||||
|
||||
LegacyPath e_user_dir;
|
||||
LegacyPath e_config_dir;
|
||||
LegacyPath e_cache_dir;
|
||||
EmuPath e_user_dir;
|
||||
EmuPath e_config_dir;
|
||||
EmuPath e_cache_dir;
|
||||
|
||||
const std::string get_user_dir() const {
|
||||
return Common::FS::GetLegacyPath(e_user_dir).string();
|
||||
|
||||
40
src/yuzu/ryujinx_dialog.cpp
Normal file
40
src/yuzu/ryujinx_dialog.cpp
Normal file
@@ -0,0 +1,40 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "ryujinx_dialog.h"
|
||||
#include "qt_common/util/fs.h"
|
||||
#include "ui_ryujinx_dialog.h"
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
RyujinxDialog::RyujinxDialog(std::filesystem::path eden_path,
|
||||
std::filesystem::path ryu_path,
|
||||
QWidget *parent)
|
||||
: QDialog(parent)
|
||||
, ui(new Ui::RyujinxDialog)
|
||||
, m_eden(eden_path.make_preferred())
|
||||
, m_ryu(ryu_path.make_preferred())
|
||||
{
|
||||
ui->setupUi(this);
|
||||
|
||||
connect(ui->eden, &QPushButton::clicked, this, &RyujinxDialog::fromEden);
|
||||
connect(ui->ryujinx, &QPushButton::clicked, this, &RyujinxDialog::fromRyujinx);
|
||||
}
|
||||
|
||||
RyujinxDialog::~RyujinxDialog()
|
||||
{
|
||||
delete ui;
|
||||
}
|
||||
|
||||
void RyujinxDialog::fromEden()
|
||||
{
|
||||
accept();
|
||||
QtCommon::FS::LinkRyujinx(m_eden, m_ryu);
|
||||
}
|
||||
|
||||
void RyujinxDialog::fromRyujinx()
|
||||
{
|
||||
accept();
|
||||
QtCommon::FS::LinkRyujinx(m_ryu, m_eden);
|
||||
}
|
||||
32
src/yuzu/ryujinx_dialog.h
Normal file
32
src/yuzu/ryujinx_dialog.h
Normal file
@@ -0,0 +1,32 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#ifndef RYUJINX_DIALOG_H
|
||||
#define RYUJINX_DIALOG_H
|
||||
|
||||
#include <QDialog>
|
||||
#include <filesystem>
|
||||
|
||||
namespace Ui {
|
||||
class RyujinxDialog;
|
||||
}
|
||||
|
||||
class RyujinxDialog : public QDialog
|
||||
{
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
explicit RyujinxDialog(std::filesystem::path eden_path, std::filesystem::path ryu_path, QWidget *parent = nullptr);
|
||||
~RyujinxDialog();
|
||||
|
||||
private slots:
|
||||
void fromEden();
|
||||
void fromRyujinx();
|
||||
|
||||
private:
|
||||
Ui::RyujinxDialog *ui;
|
||||
std::filesystem::path m_eden;
|
||||
std::filesystem::path m_ryu;
|
||||
};
|
||||
|
||||
#endif // RYUJINX_DIALOG_H
|
||||
81
src/yuzu/ryujinx_dialog.ui
Normal file
81
src/yuzu/ryujinx_dialog.ui
Normal file
@@ -0,0 +1,81 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ui version="4.0">
|
||||
<class>RyujinxDialog</class>
|
||||
<widget class="QDialog" name="RyujinxDialog">
|
||||
<property name="geometry">
|
||||
<rect>
|
||||
<x>0</x>
|
||||
<y>0</y>
|
||||
<width>404</width>
|
||||
<height>170</height>
|
||||
</rect>
|
||||
</property>
|
||||
<property name="windowTitle">
|
||||
<string>Ryujinx Link</string>
|
||||
</property>
|
||||
<layout class="QVBoxLayout" name="verticalLayout">
|
||||
<item>
|
||||
<widget class="QLabel" name="label">
|
||||
<property name="sizePolicy">
|
||||
<sizepolicy hsizetype="Preferred" vsizetype="Expanding">
|
||||
<horstretch>0</horstretch>
|
||||
<verstretch>0</verstretch>
|
||||
</sizepolicy>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string>Linking save data to Ryujinx lets both Ryujinx and Eden reference the same save files for your games.
|
||||
|
||||
By selecting "From Eden", previous save data stored in Ryujinx will be deleted, and vice versa for "From Ryujinx".</string>
|
||||
</property>
|
||||
<property name="wordWrap">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<layout class="QHBoxLayout" name="horizontalLayout">
|
||||
<item>
|
||||
<widget class="QPushButton" name="eden">
|
||||
<property name="text">
|
||||
<string>From Eden</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QPushButton" name="ryujinx">
|
||||
<property name="text">
|
||||
<string>From Ryujinx</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QPushButton" name="cancel">
|
||||
<property name="text">
|
||||
<string>Cancel</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
<resources/>
|
||||
<connections>
|
||||
<connection>
|
||||
<sender>cancel</sender>
|
||||
<signal>clicked()</signal>
|
||||
<receiver>RyujinxDialog</receiver>
|
||||
<slot>reject()</slot>
|
||||
<hints>
|
||||
<hint type="sourcelabel">
|
||||
<x>331</x>
|
||||
<y>147</y>
|
||||
</hint>
|
||||
<hint type="destinationlabel">
|
||||
<x>201</x>
|
||||
<y>84</y>
|
||||
</hint>
|
||||
</hints>
|
||||
</connection>
|
||||
</connections>
|
||||
</ui>
|
||||
Reference in New Issue
Block a user