New upstream version 0-1330+ds

This commit is contained in:
Andrea Pappacoda 2023-02-06 00:23:59 +01:00
parent 560091b3f4
commit 9dd4c3ae86
280 changed files with 22572 additions and 22738 deletions

View File

@ -10,7 +10,7 @@ if grep -nrI '\s$' src *.yml *.txt *.md Doxyfile .gitignore .gitmodules .ci* dis
fi
# Default clang-format points to default 3.5 version one
CLANG_FORMAT=${CLANG_FORMAT:-clang-format-12}
CLANG_FORMAT=${CLANG_FORMAT:-clang-format-15}
$CLANG_FORMAT --version
if [ "$TRAVIS_EVENT_TYPE" = "pull_request" ]; then

View File

@ -2,15 +2,12 @@
# SPDX-License-Identifier: GPL-2.0-or-later
# Download all pull requests as patches that match a specific label
# Usage: python download-patches-by-label.py <Label to Match> <Root Path Folder to DL to>
# Usage: python apply-patches-by-label.py <Label to Match>
import requests, sys, json, urllib3.request, shutil, subprocess, os, traceback
import json, requests, subprocess, sys, traceback
tagline = sys.argv[2]
http = urllib3.PoolManager()
dl_list = {}
def check_individual(labels):
for label in labels:
if (label["name"] == sys.argv[1]):
@ -18,8 +15,9 @@ def check_individual(labels):
return False
def do_page(page):
url = 'https://api.github.com/repos/yuzu-emu/yuzu/pulls?page=%s' % page
url = f"https://api.github.com/repos/yuzu-emu/yuzu/pulls?page={page}"
response = requests.get(url)
response.raise_for_status()
if (response.ok):
j = json.loads(response.content)
if j == []:
@ -27,13 +25,13 @@ def do_page(page):
for pr in j:
if (check_individual(pr["labels"])):
pn = pr["number"]
print("Matched PR# %s" % pn)
print(subprocess.check_output(["git", "fetch", "https://github.com/yuzu-emu/yuzu.git", "pull/%s/head:pr-%s" % (pn, pn), "-f", "--no-recurse-submodules"]))
print(subprocess.check_output(["git", "merge", "--squash", "pr-%s" % pn]))
print(subprocess.check_output(["git", "commit", "-m\"Merge %s PR %s\"" % (tagline, pn)]))
print(f"Matched PR# {pn}")
print(subprocess.check_output(["git", "fetch", "https://github.com/yuzu-emu/yuzu.git", f"pull/{pn}/head:pr-{pn}", "-f", "--no-recurse-submodules"]))
print(subprocess.check_output(["git", "merge", "--squash", f"pr-{pn}"]))
print(subprocess.check_output(["git", "commit", f"-m\"Merge {tagline} PR {pn}\""]))
try:
for i in range(1,30):
for i in range(1,10):
do_page(i)
except:
traceback.print_exc(file=sys.stdout)

View File

@ -208,6 +208,7 @@ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/bin)
find_package(enet 1.3 MODULE)
find_package(fmt 9 REQUIRED)
find_package(inih MODULE)
find_package(LLVM MODULE)
find_package(lz4 REQUIRED)
find_package(nlohmann_json 3.8 REQUIRED)
find_package(Opus 1.3 MODULE)
@ -513,7 +514,7 @@ endif()
# against all the src files. This should be used before making a pull request.
# =======================================================================
set(CLANG_FORMAT_POSTFIX "-12")
set(CLANG_FORMAT_POSTFIX "-15")
find_program(CLANG_FORMAT
NAMES clang-format${CLANG_FORMAT_POSTFIX}
clang-format

View File

@ -0,0 +1,16 @@
# SPDX-FileCopyrightText: 2023 Alexandre Bouvier <contact@amb.tf>
#
# SPDX-License-Identifier: GPL-3.0-or-later
find_package(LLVM QUIET CONFIG)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(LLVM CONFIG_MODE)
if (LLVM_FOUND AND NOT TARGET LLVM::Demangle)
add_library(LLVM::Demangle INTERFACE IMPORTED)
llvm_map_components_to_libnames(LLVM_LIBRARIES demangle)
target_compile_definitions(LLVM::Demangle INTERFACE ${LLVM_DEFINITIONS})
target_include_directories(LLVM::Demangle INTERFACE ${LLVM_INCLUDE_DIRS})
target_link_libraries(LLVM::Demangle INTERFACE ${LLVM_LIBRARIES})
endif()

1182
dist/languages/ca.ts vendored

File diff suppressed because it is too large Load Diff

1184
dist/languages/cs.ts vendored

File diff suppressed because it is too large Load Diff

1180
dist/languages/da.ts vendored

File diff suppressed because it is too large Load Diff

1234
dist/languages/de.ts vendored

File diff suppressed because it is too large Load Diff

1188
dist/languages/el.ts vendored

File diff suppressed because it is too large Load Diff

1309
dist/languages/es.ts vendored

File diff suppressed because it is too large Load Diff

1186
dist/languages/fr.ts vendored

File diff suppressed because it is too large Load Diff

1182
dist/languages/id.ts vendored

File diff suppressed because it is too large Load Diff

1186
dist/languages/it.ts vendored

File diff suppressed because it is too large Load Diff

1186
dist/languages/ja_JP.ts vendored

File diff suppressed because it is too large Load Diff

1255
dist/languages/ko_KR.ts vendored

File diff suppressed because it is too large Load Diff

1182
dist/languages/nb.ts vendored

File diff suppressed because it is too large Load Diff

1196
dist/languages/nl.ts vendored

File diff suppressed because it is too large Load Diff

1575
dist/languages/pl.ts vendored

File diff suppressed because it is too large Load Diff

1192
dist/languages/pt_BR.ts vendored

File diff suppressed because it is too large Load Diff

1192
dist/languages/pt_PT.ts vendored

File diff suppressed because it is too large Load Diff

1268
dist/languages/ru_RU.ts vendored

File diff suppressed because it is too large Load Diff

1196
dist/languages/sv.ts vendored

File diff suppressed because it is too large Load Diff

1186
dist/languages/tr_TR.ts vendored

File diff suppressed because it is too large Load Diff

1236
dist/languages/uk.ts vendored

File diff suppressed because it is too large Load Diff

1254
dist/languages/zh_CN.ts vendored

File diff suppressed because it is too large Load Diff

1186
dist/languages/zh_TW.ts vendored

File diff suppressed because it is too large Load Diff

View File

@ -158,6 +158,9 @@ if (YUZU_USE_EXTERNAL_VULKAN_HEADERS)
add_subdirectory(Vulkan-Headers EXCLUDE_FROM_ALL)
endif()
add_library(demangle STATIC)
target_include_directories(demangle PUBLIC ./demangle)
target_sources(demangle PRIVATE demangle/ItaniumDemangle.cpp)
if (NOT TARGET LLVM::Demangle)
add_library(demangle STATIC)
target_include_directories(demangle PUBLIC ./demangle)
target_sources(demangle PRIVATE demangle/ItaniumDemangle.cpp)
add_library(LLVM::Demangle ALIAS demangle)
endif()

View File

@ -1,104 +0,0 @@
//===--- Demangle.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-FileCopyrightText: Part of the LLVM Project
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEMANGLE_DEMANGLE_H
#define LLVM_DEMANGLE_DEMANGLE_H
#include <cstddef>
#include <string>
namespace llvm {
/// This is a llvm local version of __cxa_demangle. Other than the name and
/// being in the llvm namespace it is identical.
///
/// The mangled_name is demangled into buf and returned. If the buffer is not
/// large enough, realloc is used to expand it.
///
/// The *status will be set to a value from the following enumeration
enum : int {
demangle_unknown_error = -4,
demangle_invalid_args = -3,
demangle_invalid_mangled_name = -2,
demangle_memory_alloc_failure = -1,
demangle_success = 0,
};
char *itaniumDemangle(const char *mangled_name, char *buf, size_t *n,
int *status);
enum MSDemangleFlags {
MSDF_None = 0,
MSDF_DumpBackrefs = 1 << 0,
MSDF_NoAccessSpecifier = 1 << 1,
MSDF_NoCallingConvention = 1 << 2,
MSDF_NoReturnType = 1 << 3,
MSDF_NoMemberType = 1 << 4,
};
char *microsoftDemangle(const char *mangled_name, char *buf, size_t *n,
int *status, MSDemangleFlags Flags = MSDF_None);
/// "Partial" demangler. This supports demangling a string into an AST
/// (typically an intermediate stage in itaniumDemangle) and querying certain
/// properties or partially printing the demangled name.
struct ItaniumPartialDemangler {
ItaniumPartialDemangler();
ItaniumPartialDemangler(ItaniumPartialDemangler &&Other);
ItaniumPartialDemangler &operator=(ItaniumPartialDemangler &&Other);
/// Demangle into an AST. Subsequent calls to the rest of the member functions
/// implicitly operate on the AST this produces.
/// \return true on error, false otherwise
bool partialDemangle(const char *MangledName);
/// Just print the entire mangled name into Buf. Buf and N behave like the
/// second and third parameters to itaniumDemangle.
char *finishDemangle(char *Buf, size_t *N) const;
/// Get the base name of a function. This doesn't include trailing template
/// arguments, ie for "a::b<int>" this function returns "b".
char *getFunctionBaseName(char *Buf, size_t *N) const;
/// Get the context name for a function. For "a::b::c", this function returns
/// "a::b".
char *getFunctionDeclContextName(char *Buf, size_t *N) const;
/// Get the entire name of this function.
char *getFunctionName(char *Buf, size_t *N) const;
/// Get the parameters for this function.
char *getFunctionParameters(char *Buf, size_t *N) const;
char *getFunctionReturnType(char *Buf, size_t *N) const;
/// If this function has any any cv or reference qualifiers. These imply that
/// the function is a non-static member function.
bool hasFunctionQualifiers() const;
/// If this symbol describes a constructor or destructor.
bool isCtorOrDtor() const;
/// If this symbol describes a function.
bool isFunction() const;
/// If this symbol describes a variable.
bool isData() const;
/// If this symbol is a <special-name>. These are generally implicitly
/// generated by the implementation, such as vtables and typeinfo names.
bool isSpecialName() const;
~ItaniumPartialDemangler();
private:
void *RootNode;
void *Context;
};
} // namespace llvm
#endif

View File

@ -1,93 +0,0 @@
//===--- DemangleConfig.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-FileCopyrightText: Part of the LLVM Project
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains a variety of feature test macros copied from
// include/llvm/Support/Compiler.h so that LLVMDemangle does not need to take
// a dependency on LLVMSupport.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEMANGLE_COMPILER_H
#define LLVM_DEMANGLE_COMPILER_H
#ifndef __has_feature
#define __has_feature(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_builtin
#define __has_builtin(x) 0
#endif
#ifndef DEMANGLE_GNUC_PREREQ
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
#define DEMANGLE_GNUC_PREREQ(maj, min, patch) \
((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) + __GNUC_PATCHLEVEL__ >= \
((maj) << 20) + ((min) << 10) + (patch))
#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
#define DEMANGLE_GNUC_PREREQ(maj, min, patch) \
((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) >= ((maj) << 20) + ((min) << 10))
#else
#define DEMANGLE_GNUC_PREREQ(maj, min, patch) 0
#endif
#endif
#if __has_attribute(used) || DEMANGLE_GNUC_PREREQ(3, 1, 0)
#define DEMANGLE_ATTRIBUTE_USED __attribute__((__used__))
#else
#define DEMANGLE_ATTRIBUTE_USED
#endif
#if __has_builtin(__builtin_unreachable) || DEMANGLE_GNUC_PREREQ(4, 5, 0)
#define DEMANGLE_UNREACHABLE __builtin_unreachable()
#elif defined(_MSC_VER)
#define DEMANGLE_UNREACHABLE __assume(false)
#else
#define DEMANGLE_UNREACHABLE
#endif
#if __has_attribute(noinline) || DEMANGLE_GNUC_PREREQ(3, 4, 0)
#define DEMANGLE_ATTRIBUTE_NOINLINE __attribute__((noinline))
#elif defined(_MSC_VER)
#define DEMANGLE_ATTRIBUTE_NOINLINE __declspec(noinline)
#else
#define DEMANGLE_ATTRIBUTE_NOINLINE
#endif
#if !defined(NDEBUG)
#define DEMANGLE_DUMP_METHOD DEMANGLE_ATTRIBUTE_NOINLINE DEMANGLE_ATTRIBUTE_USED
#else
#define DEMANGLE_DUMP_METHOD DEMANGLE_ATTRIBUTE_NOINLINE
#endif
#if __cplusplus > 201402L && __has_cpp_attribute(fallthrough)
#define DEMANGLE_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define DEMANGLE_FALLTHROUGH [[gnu::fallthrough]]
#elif !__cplusplus
// Workaround for llvm.org/PR23435, since clang 3.6 and below emit a spurious
// error when __has_cpp_attribute is given a scoped attribute in C mode.
#define DEMANGLE_FALLTHROUGH
#elif __has_cpp_attribute(clang::fallthrough)
#define DEMANGLE_FALLTHROUGH [[clang::fallthrough]]
#else
#define DEMANGLE_FALLTHROUGH
#endif
#define DEMANGLE_NAMESPACE_BEGIN namespace llvm { namespace itanium_demangle {
#define DEMANGLE_NAMESPACE_END } }
#endif

View File

@ -1,588 +0,0 @@
//===------------------------- ItaniumDemangle.cpp ------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-FileCopyrightText: Part of the LLVM Project
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// FIXME: (possibly) incomplete list of features that clang mangles that this
// file does not yet support:
// - C++ modules TS
#include "Demangle.h"
#include "ItaniumDemangle.h"
#include <cassert>
#include <cctype>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <functional>
#include <numeric>
#include <utility>
#include <vector>
using namespace llvm;
using namespace llvm::itanium_demangle;
constexpr const char *itanium_demangle::FloatData<float>::spec;
constexpr const char *itanium_demangle::FloatData<double>::spec;
constexpr const char *itanium_demangle::FloatData<long double>::spec;
// <discriminator> := _ <non-negative number> # when number < 10
// := __ <non-negative number> _ # when number >= 10
// extension := decimal-digit+ # at the end of string
const char *itanium_demangle::parse_discriminator(const char *first,
const char *last) {
// parse but ignore discriminator
if (first != last) {
if (*first == '_') {
const char *t1 = first + 1;
if (t1 != last) {
if (std::isdigit(*t1))
first = t1 + 1;
else if (*t1 == '_') {
for (++t1; t1 != last && std::isdigit(*t1); ++t1)
;
if (t1 != last && *t1 == '_')
first = t1 + 1;
}
}
} else if (std::isdigit(*first)) {
const char *t1 = first + 1;
for (; t1 != last && std::isdigit(*t1); ++t1)
;
if (t1 == last)
first = last;
}
}
return first;
}
#ifndef NDEBUG
namespace {
struct DumpVisitor {
unsigned Depth = 0;
bool PendingNewline = false;
template<typename NodeT> static constexpr bool wantsNewline(const NodeT *) {
return true;
}
static bool wantsNewline(NodeArray A) { return !A.empty(); }
static constexpr bool wantsNewline(...) { return false; }
template<typename ...Ts> static bool anyWantNewline(Ts ...Vs) {
for (bool B : {wantsNewline(Vs)...})
if (B)
return true;
return false;
}
void printStr(const char *S) { fprintf(stderr, "%s", S); }
void print(StringView SV) {
fprintf(stderr, "\"%.*s\"", (int)SV.size(), SV.begin());
}
void print(const Node *N) {
if (N)
N->visit(std::ref(*this));
else
printStr("<null>");
}
void print(NodeOrString NS) {
if (NS.isNode())
print(NS.asNode());
else if (NS.isString())
print(NS.asString());
else
printStr("NodeOrString()");
}
void print(NodeArray A) {
++Depth;
printStr("{");
bool First = true;
for (const Node *N : A) {
if (First)
print(N);
else
printWithComma(N);
First = false;
}
printStr("}");
--Depth;
}
// Overload used when T is exactly 'bool', not merely convertible to 'bool'.
void print(bool B) { printStr(B ? "true" : "false"); }
template <class T>
typename std::enable_if<std::is_unsigned<T>::value>::type print(T N) {
fprintf(stderr, "%llu", (unsigned long long)N);
}
template <class T>
typename std::enable_if<std::is_signed<T>::value>::type print(T N) {
fprintf(stderr, "%lld", (long long)N);
}
void print(ReferenceKind RK) {
switch (RK) {
case ReferenceKind::LValue:
return printStr("ReferenceKind::LValue");
case ReferenceKind::RValue:
return printStr("ReferenceKind::RValue");
}
}
void print(FunctionRefQual RQ) {
switch (RQ) {
case FunctionRefQual::FrefQualNone:
return printStr("FunctionRefQual::FrefQualNone");
case FunctionRefQual::FrefQualLValue:
return printStr("FunctionRefQual::FrefQualLValue");
case FunctionRefQual::FrefQualRValue:
return printStr("FunctionRefQual::FrefQualRValue");
}
}
void print(Qualifiers Qs) {
if (!Qs) return printStr("QualNone");
struct QualName { Qualifiers Q; const char *Name; } Names[] = {
{QualConst, "QualConst"},
{QualVolatile, "QualVolatile"},
{QualRestrict, "QualRestrict"},
};
for (QualName Name : Names) {
if (Qs & Name.Q) {
printStr(Name.Name);
Qs = Qualifiers(Qs & ~Name.Q);
if (Qs) printStr(" | ");
}
}
}
void print(SpecialSubKind SSK) {
switch (SSK) {
case SpecialSubKind::allocator:
return printStr("SpecialSubKind::allocator");
case SpecialSubKind::basic_string:
return printStr("SpecialSubKind::basic_string");
case SpecialSubKind::string:
return printStr("SpecialSubKind::string");
case SpecialSubKind::istream:
return printStr("SpecialSubKind::istream");
case SpecialSubKind::ostream:
return printStr("SpecialSubKind::ostream");
case SpecialSubKind::iostream:
return printStr("SpecialSubKind::iostream");
}
}
void print(TemplateParamKind TPK) {
switch (TPK) {
case TemplateParamKind::Type:
return printStr("TemplateParamKind::Type");
case TemplateParamKind::NonType:
return printStr("TemplateParamKind::NonType");
case TemplateParamKind::Template:
return printStr("TemplateParamKind::Template");
}
}
void newLine() {
printStr("\n");
for (unsigned I = 0; I != Depth; ++I)
printStr(" ");
PendingNewline = false;
}
template<typename T> void printWithPendingNewline(T V) {
print(V);
if (wantsNewline(V))
PendingNewline = true;
}
template<typename T> void printWithComma(T V) {
if (PendingNewline || wantsNewline(V)) {
printStr(",");
newLine();
} else {
printStr(", ");
}
printWithPendingNewline(V);
}
struct CtorArgPrinter {
DumpVisitor &Visitor;
template<typename T, typename ...Rest> void operator()(T V, Rest ...Vs) {
if (Visitor.anyWantNewline(V, Vs...))
Visitor.newLine();
Visitor.printWithPendingNewline(V);
int PrintInOrder[] = { (Visitor.printWithComma(Vs), 0)..., 0 };
(void)PrintInOrder;
}
};
template<typename NodeT> void operator()(const NodeT *Node) {
Depth += 2;
fprintf(stderr, "%s(", itanium_demangle::NodeKind<NodeT>::name());
Node->match(CtorArgPrinter{*this});
fprintf(stderr, ")");
Depth -= 2;
}
void operator()(const ForwardTemplateReference *Node) {
Depth += 2;
fprintf(stderr, "ForwardTemplateReference(");
if (Node->Ref && !Node->Printing) {
Node->Printing = true;
CtorArgPrinter{*this}(Node->Ref);
Node->Printing = false;
} else {
CtorArgPrinter{*this}(Node->Index);
}
fprintf(stderr, ")");
Depth -= 2;
}
};
}
void itanium_demangle::Node::dump() const {
DumpVisitor V;
visit(std::ref(V));
V.newLine();
}
#endif
namespace {
class BumpPointerAllocator {
struct BlockMeta {
BlockMeta* Next;
size_t Current;
};
static constexpr size_t AllocSize = 4096;
static constexpr size_t UsableAllocSize = AllocSize - sizeof(BlockMeta);
alignas(long double) char InitialBuffer[AllocSize];
BlockMeta* BlockList = nullptr;
void grow() {
char* NewMeta = static_cast<char *>(std::malloc(AllocSize));
if (NewMeta == nullptr)
std::terminate();
BlockList = new (NewMeta) BlockMeta{BlockList, 0};
}
void* allocateMassive(size_t NBytes) {
NBytes += sizeof(BlockMeta);
BlockMeta* NewMeta = reinterpret_cast<BlockMeta*>(std::malloc(NBytes));
if (NewMeta == nullptr)
std::terminate();
BlockList->Next = new (NewMeta) BlockMeta{BlockList->Next, 0};
return static_cast<void*>(NewMeta + 1);
}
public:
BumpPointerAllocator()
: BlockList(new (InitialBuffer) BlockMeta{nullptr, 0}) {}
void* allocate(size_t N) {
N = (N + 15u) & ~15u;
if (N + BlockList->Current >= UsableAllocSize) {
if (N > UsableAllocSize)
return allocateMassive(N);
grow();
}
BlockList->Current += N;
return static_cast<void*>(reinterpret_cast<char*>(BlockList + 1) +
BlockList->Current - N);
}
void reset() {
while (BlockList) {
BlockMeta* Tmp = BlockList;
BlockList = BlockList->Next;
if (reinterpret_cast<char*>(Tmp) != InitialBuffer)
std::free(Tmp);
}
BlockList = new (InitialBuffer) BlockMeta{nullptr, 0};
}
~BumpPointerAllocator() { reset(); }
};
class DefaultAllocator {
BumpPointerAllocator Alloc;
public:
void reset() { Alloc.reset(); }
template<typename T, typename ...Args> T *makeNode(Args &&...args) {
return new (Alloc.allocate(sizeof(T)))
T(std::forward<Args>(args)...);
}
void *allocateNodeArray(size_t sz) {
return Alloc.allocate(sizeof(Node *) * sz);
}
};
} // unnamed namespace
//===----------------------------------------------------------------------===//
// Code beyond this point should not be synchronized with libc++abi.
//===----------------------------------------------------------------------===//
using Demangler = itanium_demangle::ManglingParser<DefaultAllocator>;
char *llvm::itaniumDemangle(const char *MangledName, char *Buf,
size_t *N, int *Status) {
if (MangledName == nullptr || (Buf != nullptr && N == nullptr)) {
if (Status)
*Status = demangle_invalid_args;
return nullptr;
}
int InternalStatus = demangle_success;
Demangler Parser(MangledName, MangledName + std::strlen(MangledName));
OutputStream S;
Node *AST = Parser.parse();
if (AST == nullptr)
InternalStatus = demangle_invalid_mangled_name;
else if (!initializeOutputStream(Buf, N, S, 1024))
InternalStatus = demangle_memory_alloc_failure;
else {
assert(Parser.ForwardTemplateRefs.empty());
AST->print(S);
S += '\0';
if (N != nullptr)
*N = S.getCurrentPosition();
Buf = S.getBuffer();
}
if (Status)
*Status = InternalStatus;
return InternalStatus == demangle_success ? Buf : nullptr;
}
ItaniumPartialDemangler::ItaniumPartialDemangler()
: RootNode(nullptr), Context(new Demangler{nullptr, nullptr}) {}
ItaniumPartialDemangler::~ItaniumPartialDemangler() {
delete static_cast<Demangler *>(Context);
}
ItaniumPartialDemangler::ItaniumPartialDemangler(
ItaniumPartialDemangler &&Other)
: RootNode(Other.RootNode), Context(Other.Context) {
Other.Context = Other.RootNode = nullptr;
}
ItaniumPartialDemangler &ItaniumPartialDemangler::
operator=(ItaniumPartialDemangler &&Other) {
std::swap(RootNode, Other.RootNode);
std::swap(Context, Other.Context);
return *this;
}
// Demangle MangledName into an AST, storing it into this->RootNode.
bool ItaniumPartialDemangler::partialDemangle(const char *MangledName) {
Demangler *Parser = static_cast<Demangler *>(Context);
size_t Len = std::strlen(MangledName);
Parser->reset(MangledName, MangledName + Len);
RootNode = Parser->parse();
return RootNode == nullptr;
}
static char *printNode(const Node *RootNode, char *Buf, size_t *N) {
OutputStream S;
if (!initializeOutputStream(Buf, N, S, 128))
return nullptr;
RootNode->print(S);
S += '\0';
if (N != nullptr)
*N = S.getCurrentPosition();
return S.getBuffer();
}
char *ItaniumPartialDemangler::getFunctionBaseName(char *Buf, size_t *N) const {
if (!isFunction())
return nullptr;
const Node *Name = static_cast<const FunctionEncoding *>(RootNode)->getName();
while (true) {
switch (Name->getKind()) {
case Node::KAbiTagAttr:
Name = static_cast<const AbiTagAttr *>(Name)->Base;
continue;
case Node::KStdQualifiedName:
Name = static_cast<const StdQualifiedName *>(Name)->Child;
continue;
case Node::KNestedName:
Name = static_cast<const NestedName *>(Name)->Name;
continue;
case Node::KLocalName:
Name = static_cast<const LocalName *>(Name)->Entity;
continue;
case Node::KNameWithTemplateArgs:
Name = static_cast<const NameWithTemplateArgs *>(Name)->Name;
continue;
default:
return printNode(Name, Buf, N);
}
}
}
char *ItaniumPartialDemangler::getFunctionDeclContextName(char *Buf,
size_t *N) const {
if (!isFunction())
return nullptr;
const Node *Name = static_cast<const FunctionEncoding *>(RootNode)->getName();
OutputStream S;
if (!initializeOutputStream(Buf, N, S, 128))
return nullptr;
KeepGoingLocalFunction:
while (true) {
if (Name->getKind() == Node::KAbiTagAttr) {
Name = static_cast<const AbiTagAttr *>(Name)->Base;
continue;
}
if (Name->getKind() == Node::KNameWithTemplateArgs) {
Name = static_cast<const NameWithTemplateArgs *>(Name)->Name;
continue;
}
break;
}
switch (Name->getKind()) {
case Node::KStdQualifiedName:
S += "std";
break;
case Node::KNestedName:
static_cast<const NestedName *>(Name)->Qual->print(S);
break;
case Node::KLocalName: {
auto *LN = static_cast<const LocalName *>(Name);
LN->Encoding->print(S);
S += "::";
Name = LN->Entity;
goto KeepGoingLocalFunction;
}
default:
break;
}
S += '\0';
if (N != nullptr)
*N = S.getCurrentPosition();
return S.getBuffer();
}
char *ItaniumPartialDemangler::getFunctionName(char *Buf, size_t *N) const {
if (!isFunction())
return nullptr;
auto *Name = static_cast<FunctionEncoding *>(RootNode)->getName();
return printNode(Name, Buf, N);
}
char *ItaniumPartialDemangler::getFunctionParameters(char *Buf,
size_t *N) const {
if (!isFunction())
return nullptr;
NodeArray Params = static_cast<FunctionEncoding *>(RootNode)->getParams();
OutputStream S;
if (!initializeOutputStream(Buf, N, S, 128))
return nullptr;
S += '(';
Params.printWithComma(S);
S += ')';
S += '\0';
if (N != nullptr)
*N = S.getCurrentPosition();
return S.getBuffer();
}
char *ItaniumPartialDemangler::getFunctionReturnType(
char *Buf, size_t *N) const {
if (!isFunction())
return nullptr;
OutputStream S;
if (!initializeOutputStream(Buf, N, S, 128))
return nullptr;
if (const Node *Ret =
static_cast<const FunctionEncoding *>(RootNode)->getReturnType())
Ret->print(S);
S += '\0';
if (N != nullptr)
*N = S.getCurrentPosition();
return S.getBuffer();
}
char *ItaniumPartialDemangler::finishDemangle(char *Buf, size_t *N) const {
assert(RootNode != nullptr && "must call partialDemangle()");
return printNode(static_cast<Node *>(RootNode), Buf, N);
}
bool ItaniumPartialDemangler::hasFunctionQualifiers() const {
assert(RootNode != nullptr && "must call partialDemangle()");
if (!isFunction())
return false;
auto *E = static_cast<const FunctionEncoding *>(RootNode);
return E->getCVQuals() != QualNone || E->getRefQual() != FrefQualNone;
}
bool ItaniumPartialDemangler::isCtorOrDtor() const {
const Node *N = static_cast<const Node *>(RootNode);
while (N) {
switch (N->getKind()) {
default:
return false;
case Node::KCtorDtorName:
return true;
case Node::KAbiTagAttr:
N = static_cast<const AbiTagAttr *>(N)->Base;
break;
case Node::KFunctionEncoding:
N = static_cast<const FunctionEncoding *>(N)->getName();
break;
case Node::KLocalName:
N = static_cast<const LocalName *>(N)->Entity;
break;
case Node::KNameWithTemplateArgs:
N = static_cast<const NameWithTemplateArgs *>(N)->Name;
break;
case Node::KNestedName:
N = static_cast<const NestedName *>(N)->Name;
break;
case Node::KStdQualifiedName:
N = static_cast<const StdQualifiedName *>(N)->Child;
break;
}
}
return false;
}
bool ItaniumPartialDemangler::isFunction() const {
assert(RootNode != nullptr && "must call partialDemangle()");
return static_cast<const Node *>(RootNode)->getKind() ==
Node::KFunctionEncoding;
}
bool ItaniumPartialDemangler::isSpecialName() const {
assert(RootNode != nullptr && "must call partialDemangle()");
auto K = static_cast<const Node *>(RootNode)->getKind();
return K == Node::KSpecialName || K == Node::KCtorVtableSpecialName;
}
bool ItaniumPartialDemangler::isData() const {
return !isFunction() && !isSpecialName();
}

File diff suppressed because it is too large Load Diff

View File

@ -1,127 +0,0 @@
//===--- StringView.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-FileCopyrightText: Part of the LLVM Project
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// FIXME: Use std::string_view instead when we support C++17.
//
//===----------------------------------------------------------------------===//
#ifndef DEMANGLE_STRINGVIEW_H
#define DEMANGLE_STRINGVIEW_H
#include "DemangleConfig.h"
#include <algorithm>
#include <cassert>
#include <cstring>
DEMANGLE_NAMESPACE_BEGIN
class StringView {
const char *First;
const char *Last;
public:
static const size_t npos = ~size_t(0);
template <size_t N>
StringView(const char (&Str)[N]) : First(Str), Last(Str + N - 1) {}
StringView(const char *First_, const char *Last_)
: First(First_), Last(Last_) {}
StringView(const char *First_, size_t Len)
: First(First_), Last(First_ + Len) {}
StringView(const char *Str) : First(Str), Last(Str + std::strlen(Str)) {}
StringView() : First(nullptr), Last(nullptr) {}
StringView substr(size_t From) const {
return StringView(begin() + From, size() - From);
}
size_t find(char C, size_t From = 0) const {
size_t FindBegin = std::min(From, size());
// Avoid calling memchr with nullptr.
if (FindBegin < size()) {
// Just forward to memchr, which is faster than a hand-rolled loop.
if (const void *P = ::memchr(First + FindBegin, C, size() - FindBegin))
return size_t(static_cast<const char *>(P) - First);
}
return npos;
}
StringView substr(size_t From, size_t To) const {
if (To >= size())
To = size() - 1;
if (From >= size())
From = size() - 1;
return StringView(First + From, First + To);
}
StringView dropFront(size_t N = 1) const {
if (N >= size())
N = size();
return StringView(First + N, Last);
}
StringView dropBack(size_t N = 1) const {
if (N >= size())
N = size();
return StringView(First, Last - N);
}
char front() const {
assert(!empty());
return *begin();
}
char back() const {
assert(!empty());
return *(end() - 1);
}
char popFront() {
assert(!empty());
return *First++;
}
bool consumeFront(char C) {
if (!startsWith(C))
return false;
*this = dropFront(1);
return true;
}
bool consumeFront(StringView S) {
if (!startsWith(S))
return false;
*this = dropFront(S.size());
return true;
}
bool startsWith(char C) const { return !empty() && *begin() == C; }
bool startsWith(StringView Str) const {
if (Str.size() > size())
return false;
return std::equal(Str.begin(), Str.end(), begin());
}
const char &operator[](size_t Idx) const { return *(begin() + Idx); }
const char *begin() const { return First; }
const char *end() const { return Last; }
size_t size() const { return static_cast<size_t>(Last - First); }
bool empty() const { return First == Last; }
};
inline bool operator==(const StringView &LHS, const StringView &RHS) {
return LHS.size() == RHS.size() &&
std::equal(LHS.begin(), LHS.end(), RHS.begin());
}
DEMANGLE_NAMESPACE_END
#endif

View File

@ -1,192 +0,0 @@
//===--- Utility.h ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-FileCopyrightText: Part of the LLVM Project
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Provide some utility classes for use in the demangler(s).
//
//===----------------------------------------------------------------------===//
#ifndef DEMANGLE_UTILITY_H
#define DEMANGLE_UTILITY_H
#include "StringView.h"
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <iterator>
#include <limits>
DEMANGLE_NAMESPACE_BEGIN
// Stream that AST nodes write their string representation into after the AST
// has been parsed.
class OutputStream {
char *Buffer;
size_t CurrentPosition;
size_t BufferCapacity;
// Ensure there is at least n more positions in buffer.
void grow(size_t N) {
if (N + CurrentPosition >= BufferCapacity) {
BufferCapacity *= 2;
if (BufferCapacity < N + CurrentPosition)
BufferCapacity = N + CurrentPosition;
Buffer = static_cast<char *>(std::realloc(Buffer, BufferCapacity));
if (Buffer == nullptr)
std::terminate();
}
}
void writeUnsigned(uint64_t N, bool isNeg = false) {
// Handle special case...
if (N == 0) {
*this << '0';
return;
}
char Temp[21];
char *TempPtr = std::end(Temp);
while (N) {
*--TempPtr = '0' + char(N % 10);
N /= 10;
}
// Add negative sign...
if (isNeg)
*--TempPtr = '-';
this->operator<<(StringView(TempPtr, std::end(Temp)));
}
public:
OutputStream(char *StartBuf, size_t Size)
: Buffer(StartBuf), CurrentPosition(0), BufferCapacity(Size) {}
OutputStream() = default;
void reset(char *Buffer_, size_t BufferCapacity_) {
CurrentPosition = 0;
Buffer = Buffer_;
BufferCapacity = BufferCapacity_;
}
/// If a ParameterPackExpansion (or similar type) is encountered, the offset
/// into the pack that we're currently printing.
unsigned CurrentPackIndex = std::numeric_limits<unsigned>::max();
unsigned CurrentPackMax = std::numeric_limits<unsigned>::max();
OutputStream &operator+=(StringView R) {
size_t Size = R.size();
if (Size == 0)
return *this;
grow(Size);
std::memmove(Buffer + CurrentPosition, R.begin(), Size);
CurrentPosition += Size;
return *this;
}
OutputStream &operator+=(char C) {
grow(1);
Buffer[CurrentPosition++] = C;
return *this;
}
OutputStream &operator<<(StringView R) { return (*this += R); }
OutputStream &operator<<(char C) { return (*this += C); }
OutputStream &operator<<(long long N) {
if (N < 0)
writeUnsigned(static_cast<unsigned long long>(-N), true);
else
writeUnsigned(static_cast<unsigned long long>(N));
return *this;
}
OutputStream &operator<<(unsigned long long N) {
writeUnsigned(N, false);
return *this;
}
OutputStream &operator<<(long N) {
return this->operator<<(static_cast<long long>(N));
}
OutputStream &operator<<(unsigned long N) {
return this->operator<<(static_cast<unsigned long long>(N));
}
OutputStream &operator<<(int N) {
return this->operator<<(static_cast<long long>(N));
}
OutputStream &operator<<(unsigned int N) {
return this->operator<<(static_cast<unsigned long long>(N));
}
size_t getCurrentPosition() const { return CurrentPosition; }
void setCurrentPosition(size_t NewPos) { CurrentPosition = NewPos; }
char back() const {
return CurrentPosition ? Buffer[CurrentPosition - 1] : '\0';
}
bool empty() const { return CurrentPosition == 0; }
char *getBuffer() { return Buffer; }
char *getBufferEnd() { return Buffer + CurrentPosition - 1; }
size_t getBufferCapacity() { return BufferCapacity; }
};
template <class T> class SwapAndRestore {
T &Restore;
T OriginalValue;
bool ShouldRestore = true;
public:
SwapAndRestore(T &Restore_) : SwapAndRestore(Restore_, Restore_) {}
SwapAndRestore(T &Restore_, T NewVal)
: Restore(Restore_), OriginalValue(Restore) {
Restore = std::move(NewVal);
}
~SwapAndRestore() {
if (ShouldRestore)
Restore = std::move(OriginalValue);
}
void shouldRestore(bool ShouldRestore_) { ShouldRestore = ShouldRestore_; }
void restoreNow(bool Force) {
if (!Force && !ShouldRestore)
return;
Restore = std::move(OriginalValue);
ShouldRestore = false;
}
SwapAndRestore(const SwapAndRestore &) = delete;
SwapAndRestore &operator=(const SwapAndRestore &) = delete;
};
inline bool initializeOutputStream(char *Buf, size_t *N, OutputStream &S,
size_t InitSize) {
size_t BufferSize;
if (Buf == nullptr) {
Buf = static_cast<char *>(std::malloc(InitSize));
if (Buf == nullptr)
return false;
BufferSize = InitSize;
} else
BufferSize = *N;
S.reset(Buf, BufferSize);
return true;
}
DEMANGLE_NAMESPACE_END
#endif

View File

@ -177,7 +177,7 @@ endif()
create_target_directory_groups(common)
target_link_libraries(common PUBLIC ${Boost_LIBRARIES} fmt::fmt microprofile Threads::Threads)
target_link_libraries(common PRIVATE lz4::lz4 zstd::zstd demangle)
target_link_libraries(common PRIVATE lz4::lz4 zstd::zstd LLVM::Demangle)
if (YUZU_USE_PRECOMPILED_HEADERS)
target_precompile_headers(common PRIVATE precompiled_headers.h)

View File

@ -12,7 +12,8 @@
namespace Common {
template <typename VaType, size_t AddressSpaceBits>
concept AddressSpaceValid = std::is_unsigned_v<VaType> && sizeof(VaType) * 8 >= AddressSpaceBits;
concept AddressSpaceValid = std::is_unsigned_v<VaType> && sizeof(VaType) * 8 >=
AddressSpaceBits;
struct EmptyStruct {};
@ -21,7 +22,7 @@ struct EmptyStruct {};
*/
template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa,
bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo = EmptyStruct>
requires AddressSpaceValid<VaType, AddressSpaceBits>
requires AddressSpaceValid<VaType, AddressSpaceBits>
class FlatAddressSpaceMap {
public:
/// The maximum VA that this AS can technically reach
@ -109,7 +110,7 @@ private:
* initial, fast linear pass and a subsequent slower pass that iterates until it finds a free block
*/
template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits>
requires AddressSpaceValid<VaType, AddressSpaceBits>
requires AddressSpaceValid<VaType, AddressSpaceBits>
class FlatAllocator
: public FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits> {
private:

View File

@ -10,7 +10,7 @@
namespace Common {
template <typename T>
requires std::is_unsigned_v<T>
requires std::is_unsigned_v<T>
[[nodiscard]] constexpr T AlignUp(T value, size_t size) {
auto mod{static_cast<T>(value % size)};
value -= mod;
@ -18,31 +18,31 @@ requires std::is_unsigned_v<T>
}
template <typename T>
requires std::is_unsigned_v<T>
requires std::is_unsigned_v<T>
[[nodiscard]] constexpr T AlignUpLog2(T value, size_t align_log2) {
return static_cast<T>((value + ((1ULL << align_log2) - 1)) >> align_log2 << align_log2);
}
template <typename T>
requires std::is_unsigned_v<T>
requires std::is_unsigned_v<T>
[[nodiscard]] constexpr T AlignDown(T value, size_t size) {
return static_cast<T>(value - value % size);
}
template <typename T>
requires std::is_unsigned_v<T>
requires std::is_unsigned_v<T>
[[nodiscard]] constexpr bool Is4KBAligned(T value) {
return (value & 0xFFF) == 0;
}
template <typename T>
requires std::is_unsigned_v<T>
requires std::is_unsigned_v<T>
[[nodiscard]] constexpr bool IsWordAligned(T value) {
return (value & 0b11) == 0;
}
template <typename T>
requires std::is_integral_v<T>
requires std::is_integral_v<T>
[[nodiscard]] constexpr bool IsAligned(T value, size_t alignment) {
using U = typename std::make_unsigned_t<T>;
const U mask = static_cast<U>(alignment - 1);
@ -50,7 +50,7 @@ requires std::is_integral_v<T>
}
template <typename T, typename U>
requires std::is_integral_v<T>
requires std::is_integral_v<T>
[[nodiscard]] constexpr T DivideUp(T x, U y) {
return (x + (y - 1)) / y;
}
@ -73,11 +73,11 @@ public:
constexpr AlignmentAllocator(const AlignmentAllocator<T2, Align>&) noexcept {}
[[nodiscard]] T* allocate(size_type n) {
return static_cast<T*>(::operator new (n * sizeof(T), std::align_val_t{Align}));
return static_cast<T*>(::operator new(n * sizeof(T), std::align_val_t{Align}));
}
void deallocate(T* p, size_type n) {
::operator delete (p, n * sizeof(T), std::align_val_t{Align});
::operator delete(p, n * sizeof(T), std::align_val_t{Align});
}
template <typename T2>

View File

@ -75,7 +75,7 @@ extern "C" void AnnotateHappensAfter(const char*, int, void*);
#if defined(AE_VCPP) || defined(AE_ICC)
#define AE_FORCEINLINE __forceinline
#elif defined(AE_GCC)
//#define AE_FORCEINLINE __attribute__((always_inline))
// #define AE_FORCEINLINE __attribute__((always_inline))
#define AE_FORCEINLINE inline
#else
#define AE_FORCEINLINE inline

View File

@ -45,19 +45,19 @@ template <typename T>
}
template <typename T>
requires std::is_unsigned_v<T>
requires std::is_unsigned_v<T>
[[nodiscard]] constexpr bool IsPow2(T value) {
return std::has_single_bit(value);
}
template <typename T>
requires std::is_integral_v<T>
requires std::is_integral_v<T>
[[nodiscard]] T NextPow2(T value) {
return static_cast<T>(1ULL << ((8U * sizeof(T)) - std::countl_zero(value - 1U)));
}
template <size_t bit_index, typename T>
requires std::is_integral_v<T>
requires std::is_integral_v<T>
[[nodiscard]] constexpr bool Bit(const T value) {
static_assert(bit_index < BitSize<T>(), "bit_index must be smaller than size of T");
return ((value >> bit_index) & T(1)) == T(1);

View File

@ -16,9 +16,9 @@ concept IsContiguousContainer = std::contiguous_iterator<typename T::iterator>;
// is available on all supported platforms.
template <typename Derived, typename Base>
concept DerivedFrom = requires {
std::is_base_of_v<Base, Derived>;
std::is_convertible_v<const volatile Derived*, const volatile Base*>;
};
std::is_base_of_v<Base, Derived>;
std::is_convertible_v<const volatile Derived*, const volatile Base*>;
};
// TODO: Replace with std::convertible_to when libc++ implements it.
template <typename From, typename To>

View File

@ -1,13 +1,11 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <llvm/Demangle/Demangle.h>
#include "common/demangle.h"
#include "common/scope_exit.h"
namespace llvm {
char* itaniumDemangle(const char* mangled_name, char* buf, size_t* n, int* status);
}
namespace Common {
std::string DemangleSymbol(const std::string& mangled) {

View File

@ -10,14 +10,14 @@ namespace Common {
/// Ceiled integer division.
template <typename N, typename D>
requires std::is_integral_v<N> && std::is_unsigned_v<D>
requires std::is_integral_v<N> && std::is_unsigned_v<D>
[[nodiscard]] constexpr N DivCeil(N number, D divisor) {
return static_cast<N>((static_cast<D>(number) + divisor - 1) / divisor);
}
/// Ceiled integer division with logarithmic divisor in base 2
template <typename N, typename D>
requires std::is_integral_v<N> && std::is_unsigned_v<D>
requires std::is_integral_v<N> && std::is_unsigned_v<D>
[[nodiscard]] constexpr N DivCeilLog2(N value, D alignment_log2) {
return static_cast<N>((static_cast<D>(value) + (D(1) << alignment_log2) - 1) >> alignment_log2);
}

View File

@ -64,7 +64,7 @@ struct no_init_t {
* Additionally, this requires E to be trivially destructible
*/
template <typename T, typename E, bool = std::is_trivially_destructible_v<T>>
requires std::is_trivially_destructible_v<E>
requires std::is_trivially_destructible_v<E>
struct expected_storage_base {
constexpr expected_storage_base() : m_val{T{}}, m_has_val{true} {}
@ -111,7 +111,7 @@ struct expected_storage_base {
* Additionally, this requires E to be trivially destructible
*/
template <typename T, typename E>
requires std::is_trivially_destructible_v<E>
requires std::is_trivially_destructible_v<E>
struct expected_storage_base<T, E, true> {
constexpr expected_storage_base() : m_val{T{}}, m_has_val{true} {}
@ -251,7 +251,7 @@ struct expected_operations_base : expected_storage_base<T, E> {
* Additionally, this requires E to be trivially copy constructible
*/
template <typename T, typename E, bool = std::is_trivially_copy_constructible_v<T>>
requires std::is_trivially_copy_constructible_v<E>
requires std::is_trivially_copy_constructible_v<E>
struct expected_copy_base : expected_operations_base<T, E> {
using expected_operations_base<T, E>::expected_operations_base;
};
@ -261,7 +261,7 @@ struct expected_copy_base : expected_operations_base<T, E> {
* Additionally, this requires E to be trivially copy constructible
*/
template <typename T, typename E>
requires std::is_trivially_copy_constructible_v<E>
requires std::is_trivially_copy_constructible_v<E>
struct expected_copy_base<T, E, false> : expected_operations_base<T, E> {
using expected_operations_base<T, E>::expected_operations_base;
@ -289,7 +289,7 @@ struct expected_copy_base<T, E, false> : expected_operations_base<T, E> {
* Additionally, this requires E to be trivially move constructible
*/
template <typename T, typename E, bool = std::is_trivially_move_constructible_v<T>>
requires std::is_trivially_move_constructible_v<E>
requires std::is_trivially_move_constructible_v<E>
struct expected_move_base : expected_copy_base<T, E> {
using expected_copy_base<T, E>::expected_copy_base;
};
@ -299,7 +299,7 @@ struct expected_move_base : expected_copy_base<T, E> {
* Additionally, this requires E to be trivially move constructible
*/
template <typename T, typename E>
requires std::is_trivially_move_constructible_v<E>
requires std::is_trivially_move_constructible_v<E>
struct expected_move_base<T, E, false> : expected_copy_base<T, E> {
using expected_copy_base<T, E>::expected_copy_base;
@ -330,9 +330,9 @@ template <typename T, typename E,
bool = std::conjunction_v<std::is_trivially_copy_assignable<T>,
std::is_trivially_copy_constructible<T>,
std::is_trivially_destructible<T>>>
requires std::conjunction_v<std::is_trivially_copy_assignable<E>,
std::is_trivially_copy_constructible<E>,
std::is_trivially_destructible<E>>
requires std::conjunction_v<std::is_trivially_copy_assignable<E>,
std::is_trivially_copy_constructible<E>,
std::is_trivially_destructible<E>>
struct expected_copy_assign_base : expected_move_base<T, E> {
using expected_move_base<T, E>::expected_move_base;
};
@ -342,9 +342,9 @@ struct expected_copy_assign_base : expected_move_base<T, E> {
* Additionally, this requires E to be trivially copy assignable
*/
template <typename T, typename E>
requires std::conjunction_v<std::is_trivially_copy_assignable<E>,
std::is_trivially_copy_constructible<E>,
std::is_trivially_destructible<E>>
requires std::conjunction_v<std::is_trivially_copy_assignable<E>,
std::is_trivially_copy_constructible<E>,
std::is_trivially_destructible<E>>
struct expected_copy_assign_base<T, E, false> : expected_move_base<T, E> {
using expected_move_base<T, E>::expected_move_base;
@ -371,9 +371,9 @@ template <typename T, typename E,
bool = std::conjunction_v<std::is_trivially_move_assignable<T>,
std::is_trivially_move_constructible<T>,
std::is_trivially_destructible<T>>>
requires std::conjunction_v<std::is_trivially_move_assignable<E>,
std::is_trivially_move_constructible<E>,
std::is_trivially_destructible<E>>
requires std::conjunction_v<std::is_trivially_move_assignable<E>,
std::is_trivially_move_constructible<E>,
std::is_trivially_destructible<E>>
struct expected_move_assign_base : expected_copy_assign_base<T, E> {
using expected_copy_assign_base<T, E>::expected_copy_assign_base;
};
@ -383,9 +383,9 @@ struct expected_move_assign_base : expected_copy_assign_base<T, E> {
* Additionally, this requires E to be trivially move assignable
*/
template <typename T, typename E>
requires std::conjunction_v<std::is_trivially_move_assignable<E>,
std::is_trivially_move_constructible<E>,
std::is_trivially_destructible<E>>
requires std::conjunction_v<std::is_trivially_move_assignable<E>,
std::is_trivially_move_constructible<E>,
std::is_trivially_destructible<E>>
struct expected_move_assign_base<T, E, false> : expected_copy_assign_base<T, E> {
using expected_copy_assign_base<T, E>::expected_copy_assign_base;
@ -412,7 +412,7 @@ struct expected_move_assign_base<T, E, false> : expected_copy_assign_base<T, E>
*/
template <typename T, typename E, bool EnableCopy = std::is_copy_constructible_v<T>,
bool EnableMove = std::is_move_constructible_v<T>>
requires std::conjunction_v<std::is_copy_constructible<E>, std::is_move_constructible<E>>
requires std::conjunction_v<std::is_copy_constructible<E>, std::is_move_constructible<E>>
struct expected_delete_ctor_base {
expected_delete_ctor_base() = default;
expected_delete_ctor_base(const expected_delete_ctor_base&) = default;
@ -422,7 +422,7 @@ struct expected_delete_ctor_base {
};
template <typename T, typename E>
requires std::conjunction_v<std::is_copy_constructible<E>, std::is_move_constructible<E>>
requires std::conjunction_v<std::is_copy_constructible<E>, std::is_move_constructible<E>>
struct expected_delete_ctor_base<T, E, true, false> {
expected_delete_ctor_base() = default;
expected_delete_ctor_base(const expected_delete_ctor_base&) = default;
@ -432,7 +432,7 @@ struct expected_delete_ctor_base<T, E, true, false> {
};
template <typename T, typename E>
requires std::conjunction_v<std::is_copy_constructible<E>, std::is_move_constructible<E>>
requires std::conjunction_v<std::is_copy_constructible<E>, std::is_move_constructible<E>>
struct expected_delete_ctor_base<T, E, false, true> {
expected_delete_ctor_base() = default;
expected_delete_ctor_base(const expected_delete_ctor_base&) = delete;
@ -442,7 +442,7 @@ struct expected_delete_ctor_base<T, E, false, true> {
};
template <typename T, typename E>
requires std::conjunction_v<std::is_copy_constructible<E>, std::is_move_constructible<E>>
requires std::conjunction_v<std::is_copy_constructible<E>, std::is_move_constructible<E>>
struct expected_delete_ctor_base<T, E, false, false> {
expected_delete_ctor_base() = default;
expected_delete_ctor_base(const expected_delete_ctor_base&) = delete;
@ -460,8 +460,8 @@ template <
typename T, typename E,
bool EnableCopy = std::conjunction_v<std::is_copy_constructible<T>, std::is_copy_assignable<T>>,
bool EnableMove = std::conjunction_v<std::is_move_constructible<T>, std::is_move_assignable<T>>>
requires std::conjunction_v<std::is_copy_constructible<E>, std::is_move_constructible<E>,
std::is_copy_assignable<E>, std::is_move_assignable<E>>
requires std::conjunction_v<std::is_copy_constructible<E>, std::is_move_constructible<E>,
std::is_copy_assignable<E>, std::is_move_assignable<E>>
struct expected_delete_assign_base {
expected_delete_assign_base() = default;
expected_delete_assign_base(const expected_delete_assign_base&) = default;
@ -471,8 +471,8 @@ struct expected_delete_assign_base {
};
template <typename T, typename E>
requires std::conjunction_v<std::is_copy_constructible<E>, std::is_move_constructible<E>,
std::is_copy_assignable<E>, std::is_move_assignable<E>>
requires std::conjunction_v<std::is_copy_constructible<E>, std::is_move_constructible<E>,
std::is_copy_assignable<E>, std::is_move_assignable<E>>
struct expected_delete_assign_base<T, E, true, false> {
expected_delete_assign_base() = default;
expected_delete_assign_base(const expected_delete_assign_base&) = default;
@ -482,8 +482,8 @@ struct expected_delete_assign_base<T, E, true, false> {
};
template <typename T, typename E>
requires std::conjunction_v<std::is_copy_constructible<E>, std::is_move_constructible<E>,
std::is_copy_assignable<E>, std::is_move_assignable<E>>
requires std::conjunction_v<std::is_copy_constructible<E>, std::is_move_constructible<E>,
std::is_copy_assignable<E>, std::is_move_assignable<E>>
struct expected_delete_assign_base<T, E, false, true> {
expected_delete_assign_base() = default;
expected_delete_assign_base(const expected_delete_assign_base&) = default;
@ -493,8 +493,8 @@ struct expected_delete_assign_base<T, E, false, true> {
};
template <typename T, typename E>
requires std::conjunction_v<std::is_copy_constructible<E>, std::is_move_constructible<E>,
std::is_copy_assignable<E>, std::is_move_assignable<E>>
requires std::conjunction_v<std::is_copy_constructible<E>, std::is_move_constructible<E>,
std::is_copy_assignable<E>, std::is_move_assignable<E>>
struct expected_delete_assign_base<T, E, false, false> {
expected_delete_assign_base() = default;
expected_delete_assign_base(const expected_delete_assign_base&) = default;

View File

@ -130,6 +130,8 @@ struct ButtonStatus {
bool inverted{};
// Press once to activate, press again to release
bool toggle{};
// Spams the button when active
bool turbo{};
// Internal lock for the toggle status
bool locked{};
};

View File

@ -242,19 +242,21 @@ public:
template <typename T>
concept HasRedBlackKeyType = requires {
{ std::is_same<typename T::RedBlackKeyType, void>::value } -> std::convertible_to<bool>;
};
{
std::is_same<typename T::RedBlackKeyType, void>::value
} -> std::convertible_to<bool>;
};
namespace impl {
template <typename T, typename Default>
consteval auto* GetRedBlackKeyType() {
if constexpr (HasRedBlackKeyType<T>) {
return static_cast<typename T::RedBlackKeyType*>(nullptr);
} else {
return static_cast<Default*>(nullptr);
}
template <typename T, typename Default>
consteval auto* GetRedBlackKeyType() {
if constexpr (HasRedBlackKeyType<T>) {
return static_cast<typename T::RedBlackKeyType*>(nullptr);
} else {
return static_cast<Default*>(nullptr);
}
}
} // namespace impl

View File

@ -9,17 +9,19 @@
namespace Common {
template <class T>
requires(!std::is_array_v<T>) std::unique_ptr<T> make_unique_for_overwrite() {
requires(!std::is_array_v<T>)
std::unique_ptr<T> make_unique_for_overwrite() {
return std::unique_ptr<T>(new T);
}
template <class T>
requires std::is_unbounded_array_v<T> std::unique_ptr<T> make_unique_for_overwrite(std::size_t n) {
requires std::is_unbounded_array_v<T>
std::unique_ptr<T> make_unique_for_overwrite(std::size_t n) {
return std::unique_ptr<T>(new std::remove_extent_t<T>[n]);
}
template <class T, class... Args>
requires std::is_bounded_array_v<T>
requires std::is_bounded_array_v<T>
void make_unique_for_overwrite(Args&&...) = delete;
} // namespace Common

View File

@ -18,9 +18,9 @@ namespace ranges {
template <typename T>
concept range = requires(T& t) {
begin(t);
end(t);
};
begin(t);
end(t);
};
template <typename T>
concept input_range = range<T>;
@ -421,7 +421,7 @@ struct generate_fn {
}
template <typename R, std::copy_constructible F>
requires std::invocable<F&> && ranges::output_range<R>
requires std::invocable<F&> && ranges::output_range<R>
constexpr ranges::iterator_t<R> operator()(R&& r, F gen) const {
return operator()(ranges::begin(r), ranges::end(r), std::move(gen));
}

View File

@ -11,6 +11,8 @@
#ifdef __cpp_lib_jthread
#include <chrono>
#include <condition_variable>
#include <stop_token>
#include <thread>
@ -21,23 +23,36 @@ void CondvarWait(Condvar& cv, Lock& lock, std::stop_token token, Pred&& pred) {
cv.wait(lock, token, std::move(pred));
}
template <typename Rep, typename Period>
bool StoppableTimedWait(std::stop_token token, const std::chrono::duration<Rep, Period>& rel_time) {
std::condition_variable_any cv;
std::mutex m;
// Perform the timed wait.
std::unique_lock lk{m};
return !cv.wait_for(lk, token, rel_time, [&] { return token.stop_requested(); });
}
} // namespace Common
#else
#include <atomic>
#include <chrono>
#include <condition_variable>
#include <functional>
#include <list>
#include <map>
#include <memory>
#include <mutex>
#include <optional>
#include <thread>
#include <type_traits>
#include <utility>
namespace std {
namespace polyfill {
using stop_state_callbacks = list<function<void()>>;
using stop_state_callback = size_t;
class stop_state {
public:
@ -45,61 +60,69 @@ public:
~stop_state() = default;
bool request_stop() {
stop_state_callbacks callbacks;
unique_lock lk{m_lock};
{
scoped_lock lk{m_lock};
if (m_stop_requested.load()) {
// Already set, nothing to do
return false;
}
// Set as requested
m_stop_requested = true;
// Copy callback list
callbacks = m_callbacks;
if (m_stop_requested) {
// Already set, nothing to do.
return false;
}
for (auto callback : callbacks) {
callback();
// Mark stop requested.
m_stop_requested = true;
while (!m_callbacks.empty()) {
// Get an iterator to the first element.
const auto it = m_callbacks.begin();
// Move the callback function out of the map.
function<void()> f;
swap(it->second, f);
// Erase the now-empty map element.
m_callbacks.erase(it);
// Run the callback.
if (f) {
f();
}
}
return true;
}
bool stop_requested() const {
return m_stop_requested.load();
unique_lock lk{m_lock};
return m_stop_requested;
}
stop_state_callbacks::const_iterator insert_callback(function<void()> f) {
stop_state_callbacks::const_iterator ret{};
bool should_run{};
stop_state_callback insert_callback(function<void()> f) {
unique_lock lk{m_lock};
{
scoped_lock lk{m_lock};
should_run = m_stop_requested.load();
m_callbacks.push_front(f);
ret = m_callbacks.begin();
}
if (should_run) {
f();
if (m_stop_requested) {
// Stop already requested. Don't insert anything,
// just run the callback synchronously.
if (f) {
f();
}
return 0;
}
// Insert the callback.
stop_state_callback ret = ++m_next_callback;
m_callbacks.emplace(ret, move(f));
return ret;
}
void remove_callback(stop_state_callbacks::const_iterator it) {
scoped_lock lk{m_lock};
m_callbacks.erase(it);
void remove_callback(stop_state_callback cb) {
unique_lock lk{m_lock};
m_callbacks.erase(cb);
}
private:
mutex m_lock;
atomic<bool> m_stop_requested;
stop_state_callbacks m_callbacks;
mutable recursive_mutex m_lock;
map<stop_state_callback, function<void()>> m_callbacks;
stop_state_callback m_next_callback{0};
bool m_stop_requested{false};
};
} // namespace polyfill
@ -190,7 +213,7 @@ public:
using callback_type = Callback;
template <typename C>
requires constructible_from<Callback, C>
requires constructible_from<Callback, C>
explicit stop_callback(const stop_token& st,
C&& cb) noexcept(is_nothrow_constructible_v<Callback, C>)
: m_stop_state(st.m_stop_state) {
@ -199,7 +222,7 @@ public:
}
}
template <typename C>
requires constructible_from<Callback, C>
requires constructible_from<Callback, C>
explicit stop_callback(stop_token&& st,
C&& cb) noexcept(is_nothrow_constructible_v<Callback, C>)
: m_stop_state(move(st.m_stop_state)) {
@ -209,7 +232,7 @@ public:
}
~stop_callback() {
if (m_stop_state && m_callback) {
m_stop_state->remove_callback(*m_callback);
m_stop_state->remove_callback(m_callback);
}
}
@ -220,7 +243,7 @@ public:
private:
shared_ptr<polyfill::stop_state> m_stop_state;
optional<polyfill::stop_state_callbacks::const_iterator> m_callback;
polyfill::stop_state_callback m_callback;
};
template <typename Callback>
@ -318,6 +341,28 @@ void CondvarWait(Condvar& cv, Lock& lock, std::stop_token token, Pred pred) {
cv.wait(lock, [&] { return pred() || token.stop_requested(); });
}
template <typename Rep, typename Period>
bool StoppableTimedWait(std::stop_token token, const std::chrono::duration<Rep, Period>& rel_time) {
if (token.stop_requested()) {
return false;
}
bool stop_requested = false;
std::condition_variable cv;
std::mutex m;
std::stop_callback cb(token, [&] {
// Wake up the waiting thread.
std::unique_lock lk{m};
stop_requested = true;
cv.notify_one();
});
// Perform the timed wait.
std::unique_lock lk{m};
return !cv.wait_for(lk, rel_time, [&] { return stop_requested; });
}
} // namespace Common
#endif

View File

@ -131,7 +131,8 @@ public:
* @param default_val Intial value of the setting, and default value of the setting
* @param name Label for the setting
*/
explicit Setting(const Type& default_val, const std::string& name) requires(!ranged)
explicit Setting(const Type& default_val, const std::string& name)
requires(!ranged)
: value{default_val}, default_value{default_val}, label{name} {}
virtual ~Setting() = default;
@ -144,7 +145,8 @@ public:
* @param name Label for the setting
*/
explicit Setting(const Type& default_val, const Type& min_val, const Type& max_val,
const std::string& name) requires(ranged)
const std::string& name)
requires(ranged)
: value{default_val},
default_value{default_val}, maximum{max_val}, minimum{min_val}, label{name} {}
@ -232,7 +234,8 @@ public:
* @param default_val Intial value of the setting, and default value of the setting
* @param name Label for the setting
*/
explicit SwitchableSetting(const Type& default_val, const std::string& name) requires(!ranged)
explicit SwitchableSetting(const Type& default_val, const std::string& name)
requires(!ranged)
: Setting<Type>{default_val, name} {}
virtual ~SwitchableSetting() = default;
@ -245,7 +248,8 @@ public:
* @param name Label for the setting
*/
explicit SwitchableSetting(const Type& default_val, const Type& min_val, const Type& max_val,
const std::string& name) requires(ranged)
const std::string& name)
requires(ranged)
: Setting<Type, true>{default_val, min_val, max_val, name} {}
/**

View File

@ -30,7 +30,7 @@ std::string ToUpper(std::string str) {
return str;
}
std::string StringFromBuffer(const std::vector<u8>& data) {
std::string StringFromBuffer(std::span<const u8> data) {
return std::string(data.begin(), std::find(data.begin(), data.end(), '\0'));
}

View File

@ -5,6 +5,7 @@
#pragma once
#include <cstddef>
#include <span>
#include <string>
#include <vector>
#include "common/common_types.h"
@ -17,7 +18,7 @@ namespace Common {
/// Make a string uppercase
[[nodiscard]] std::string ToUpper(std::string str);
[[nodiscard]] std::string StringFromBuffer(const std::vector<u8>& data);
[[nodiscard]] std::string StringFromBuffer(std::span<const u8> data);
[[nodiscard]] std::string StripSpaces(const std::string& s);
[[nodiscard]] std::string StripQuotes(const std::string& s);

View File

@ -103,12 +103,12 @@ concept IsRBEntry = CheckRBEntry<T>::value;
template <typename T>
concept HasRBEntry = requires(T& t, const T& ct) {
{ t.GetRBEntry() } -> std::same_as<RBEntry<T>&>;
{ ct.GetRBEntry() } -> std::same_as<const RBEntry<T>&>;
};
{ t.GetRBEntry() } -> std::same_as<RBEntry<T>&>;
{ ct.GetRBEntry() } -> std::same_as<const RBEntry<T>&>;
};
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
class RBHead {
private:
T* m_rbh_root = nullptr;
@ -130,90 +130,90 @@ public:
};
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
[[nodiscard]] constexpr RBEntry<T>& RB_ENTRY(T* t) {
return t->GetRBEntry();
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
[[nodiscard]] constexpr const RBEntry<T>& RB_ENTRY(const T* t) {
return t->GetRBEntry();
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
[[nodiscard]] constexpr T* RB_LEFT(T* t) {
return RB_ENTRY(t).Left();
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
[[nodiscard]] constexpr const T* RB_LEFT(const T* t) {
return RB_ENTRY(t).Left();
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
[[nodiscard]] constexpr T* RB_RIGHT(T* t) {
return RB_ENTRY(t).Right();
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
[[nodiscard]] constexpr const T* RB_RIGHT(const T* t) {
return RB_ENTRY(t).Right();
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
[[nodiscard]] constexpr T* RB_PARENT(T* t) {
return RB_ENTRY(t).Parent();
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
[[nodiscard]] constexpr const T* RB_PARENT(const T* t) {
return RB_ENTRY(t).Parent();
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr void RB_SET_LEFT(T* t, T* e) {
RB_ENTRY(t).SetLeft(e);
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr void RB_SET_RIGHT(T* t, T* e) {
RB_ENTRY(t).SetRight(e);
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr void RB_SET_PARENT(T* t, T* e) {
RB_ENTRY(t).SetParent(e);
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
[[nodiscard]] constexpr bool RB_IS_BLACK(const T* t) {
return RB_ENTRY(t).IsBlack();
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
[[nodiscard]] constexpr bool RB_IS_RED(const T* t) {
return RB_ENTRY(t).IsRed();
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
[[nodiscard]] constexpr RBColor RB_COLOR(const T* t) {
return RB_ENTRY(t).Color();
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr void RB_SET_COLOR(T* t, RBColor c) {
RB_ENTRY(t).SetColor(c);
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr void RB_SET(T* elm, T* parent) {
auto& rb_entry = RB_ENTRY(elm);
rb_entry.SetParent(parent);
@ -223,14 +223,14 @@ constexpr void RB_SET(T* elm, T* parent) {
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr void RB_SET_BLACKRED(T* black, T* red) {
RB_SET_COLOR(black, RBColor::RB_BLACK);
RB_SET_COLOR(red, RBColor::RB_RED);
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr void RB_ROTATE_LEFT(RBHead<T>& head, T* elm, T*& tmp) {
tmp = RB_RIGHT(elm);
if (RB_SET_RIGHT(elm, RB_LEFT(tmp)); RB_RIGHT(elm) != nullptr) {
@ -252,7 +252,7 @@ constexpr void RB_ROTATE_LEFT(RBHead<T>& head, T* elm, T*& tmp) {
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr void RB_ROTATE_RIGHT(RBHead<T>& head, T* elm, T*& tmp) {
tmp = RB_LEFT(elm);
if (RB_SET_LEFT(elm, RB_RIGHT(tmp)); RB_LEFT(elm) != nullptr) {
@ -274,7 +274,7 @@ constexpr void RB_ROTATE_RIGHT(RBHead<T>& head, T* elm, T*& tmp) {
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr void RB_REMOVE_COLOR(RBHead<T>& head, T* parent, T* elm) {
T* tmp;
while ((elm == nullptr || RB_IS_BLACK(elm)) && elm != head.Root()) {
@ -358,7 +358,7 @@ constexpr void RB_REMOVE_COLOR(RBHead<T>& head, T* parent, T* elm) {
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr T* RB_REMOVE(RBHead<T>& head, T* elm) {
T* child = nullptr;
T* parent = nullptr;
@ -451,7 +451,7 @@ constexpr T* RB_REMOVE(RBHead<T>& head, T* elm) {
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr void RB_INSERT_COLOR(RBHead<T>& head, T* elm) {
T *parent = nullptr, *tmp = nullptr;
while ((parent = RB_PARENT(elm)) != nullptr && RB_IS_RED(parent)) {
@ -499,7 +499,7 @@ constexpr void RB_INSERT_COLOR(RBHead<T>& head, T* elm) {
}
template <typename T, typename Compare>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr T* RB_INSERT(RBHead<T>& head, T* elm, Compare cmp) {
T* parent = nullptr;
T* tmp = head.Root();
@ -534,7 +534,7 @@ constexpr T* RB_INSERT(RBHead<T>& head, T* elm, Compare cmp) {
}
template <typename T, typename Compare>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr T* RB_FIND(RBHead<T>& head, T* elm, Compare cmp) {
T* tmp = head.Root();
@ -553,7 +553,7 @@ constexpr T* RB_FIND(RBHead<T>& head, T* elm, Compare cmp) {
}
template <typename T, typename Compare>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr T* RB_NFIND(RBHead<T>& head, T* elm, Compare cmp) {
T* tmp = head.Root();
T* res = nullptr;
@ -574,7 +574,7 @@ constexpr T* RB_NFIND(RBHead<T>& head, T* elm, Compare cmp) {
}
template <typename T, typename U, typename Compare>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr T* RB_FIND_KEY(RBHead<T>& head, const U& key, Compare cmp) {
T* tmp = head.Root();
@ -593,7 +593,7 @@ constexpr T* RB_FIND_KEY(RBHead<T>& head, const U& key, Compare cmp) {
}
template <typename T, typename U, typename Compare>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr T* RB_NFIND_KEY(RBHead<T>& head, const U& key, Compare cmp) {
T* tmp = head.Root();
T* res = nullptr;
@ -614,7 +614,7 @@ constexpr T* RB_NFIND_KEY(RBHead<T>& head, const U& key, Compare cmp) {
}
template <typename T, typename Compare>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr T* RB_FIND_EXISTING(RBHead<T>& head, T* elm, Compare cmp) {
T* tmp = head.Root();
@ -631,7 +631,7 @@ constexpr T* RB_FIND_EXISTING(RBHead<T>& head, T* elm, Compare cmp) {
}
template <typename T, typename U, typename Compare>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr T* RB_FIND_EXISTING_KEY(RBHead<T>& head, const U& key, Compare cmp) {
T* tmp = head.Root();
@ -648,7 +648,7 @@ constexpr T* RB_FIND_EXISTING_KEY(RBHead<T>& head, const U& key, Compare cmp) {
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr T* RB_NEXT(T* elm) {
if (RB_RIGHT(elm)) {
elm = RB_RIGHT(elm);
@ -669,7 +669,7 @@ constexpr T* RB_NEXT(T* elm) {
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr T* RB_PREV(T* elm) {
if (RB_LEFT(elm)) {
elm = RB_LEFT(elm);
@ -690,7 +690,7 @@ constexpr T* RB_PREV(T* elm) {
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr T* RB_MIN(RBHead<T>& head) {
T* tmp = head.Root();
T* parent = nullptr;
@ -704,7 +704,7 @@ constexpr T* RB_MIN(RBHead<T>& head) {
}
template <typename T>
requires HasRBEntry<T>
requires HasRBEntry<T>
constexpr T* RB_MAX(RBHead<T>& head) {
T* tmp = head.Root();
T* parent = nullptr;

View File

@ -348,9 +348,7 @@ public:
// _DEFINE_SWIZZLER2 defines a single such function, DEFINE_SWIZZLER2 defines all of them for all
// component names (x<->r) and permutations (xy<->yx)
#define _DEFINE_SWIZZLER2(a, b, name) \
[[nodiscard]] constexpr Vec2<T> name() const { \
return Vec2<T>(a, b); \
}
[[nodiscard]] constexpr Vec2<T> name() const { return Vec2<T>(a, b); }
#define DEFINE_SWIZZLER2(a, b, a2, b2, a3, b3, a4, b4) \
_DEFINE_SWIZZLER2(a, b, a##b); \
_DEFINE_SWIZZLER2(a, b, a2##b2); \
@ -543,9 +541,7 @@ public:
// DEFINE_SWIZZLER2_COMP2 defines two component functions for all component names (x<->r) and
// permutations (xy<->yx)
#define _DEFINE_SWIZZLER2(a, b, name) \
[[nodiscard]] constexpr Vec2<T> name() const { \
return Vec2<T>(a, b); \
}
[[nodiscard]] constexpr Vec2<T> name() const { return Vec2<T>(a, b); }
#define DEFINE_SWIZZLER2_COMP1(a, a2) \
_DEFINE_SWIZZLER2(a, a, a##a); \
_DEFINE_SWIZZLER2(a, a, a2##a2)
@ -570,9 +566,7 @@ public:
#undef _DEFINE_SWIZZLER2
#define _DEFINE_SWIZZLER3(a, b, c, name) \
[[nodiscard]] constexpr Vec3<T> name() const { \
return Vec3<T>(a, b, c); \
}
[[nodiscard]] constexpr Vec3<T> name() const { return Vec3<T>(a, b, c); }
#define DEFINE_SWIZZLER3_COMP1(a, a2) \
_DEFINE_SWIZZLER3(a, a, a, a##a##a); \
_DEFINE_SWIZZLER3(a, a, a, a2##a2##a2)
@ -641,8 +635,8 @@ template <typename T>
// linear interpolation via float: 0.0=begin, 1.0=end
template <typename X>
[[nodiscard]] constexpr decltype(X{} * float{} + X{} * float{})
Lerp(const X& begin, const X& end, const float t) {
[[nodiscard]] constexpr decltype(X{} * float{} + X{} * float{}) Lerp(const X& begin, const X& end,
const float t) {
return begin * (1.f - t) + end * t;
}

View File

@ -182,6 +182,8 @@ add_library(core STATIC
hle/kernel/k_auto_object_container.cpp
hle/kernel/k_auto_object_container.h
hle/kernel/k_affinity_mask.h
hle/kernel/k_capabilities.cpp
hle/kernel/k_capabilities.h
hle/kernel/k_class_token.cpp
hle/kernel/k_class_token.h
hle/kernel/k_client_port.cpp
@ -193,6 +195,8 @@ add_library(core STATIC
hle/kernel/k_condition_variable.cpp
hle/kernel/k_condition_variable.h
hle/kernel/k_debug.h
hle/kernel/k_device_address_space.cpp
hle/kernel/k_device_address_space.h
hle/kernel/k_dynamic_page_manager.h
hle/kernel/k_dynamic_resource_manager.h
hle/kernel/k_dynamic_slab_heap.h
@ -294,7 +298,42 @@ add_library(core STATIC
hle/kernel/svc.h
hle/kernel/svc_common.h
hle/kernel/svc_types.h
hle/kernel/svc_wrap.h
hle/kernel/svc/svc_activity.cpp
hle/kernel/svc/svc_address_arbiter.cpp
hle/kernel/svc/svc_address_translation.cpp
hle/kernel/svc/svc_cache.cpp
hle/kernel/svc/svc_code_memory.cpp
hle/kernel/svc/svc_condition_variable.cpp
hle/kernel/svc/svc_debug.cpp
hle/kernel/svc/svc_debug_string.cpp
hle/kernel/svc/svc_device_address_space.cpp
hle/kernel/svc/svc_event.cpp
hle/kernel/svc/svc_exception.cpp
hle/kernel/svc/svc_info.cpp
hle/kernel/svc/svc_interrupt_event.cpp
hle/kernel/svc/svc_io_pool.cpp
hle/kernel/svc/svc_ipc.cpp
hle/kernel/svc/svc_kernel_debug.cpp
hle/kernel/svc/svc_light_ipc.cpp
hle/kernel/svc/svc_lock.cpp
hle/kernel/svc/svc_memory.cpp
hle/kernel/svc/svc_physical_memory.cpp
hle/kernel/svc/svc_port.cpp
hle/kernel/svc/svc_power_management.cpp
hle/kernel/svc/svc_process.cpp
hle/kernel/svc/svc_process_memory.cpp
hle/kernel/svc/svc_processor.cpp
hle/kernel/svc/svc_query_memory.cpp
hle/kernel/svc/svc_register.cpp
hle/kernel/svc/svc_resource_limit.cpp
hle/kernel/svc/svc_secure_monitor_call.cpp
hle/kernel/svc/svc_session.cpp
hle/kernel/svc/svc_shared_memory.cpp
hle/kernel/svc/svc_synchronization.cpp
hle/kernel/svc/svc_thread.cpp
hle/kernel/svc/svc_thread_profiler.cpp
hle/kernel/svc/svc_tick.cpp
hle/kernel/svc/svc_transfer_memory.cpp
hle/result.h
hle/service/acc/acc.cpp
hle/service/acc/acc.h

View File

@ -25,6 +25,26 @@ constexpr std::array<s32, Common::BitSize<u64>()> VirtualToPhysicalCoreMap{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
};
static constexpr inline size_t NumVirtualCores = Common::BitSize<u64>();
static constexpr inline u64 VirtualCoreMask = [] {
u64 mask = 0;
for (size_t i = 0; i < NumVirtualCores; ++i) {
mask |= (UINT64_C(1) << i);
}
return mask;
}();
static constexpr inline u64 ConvertVirtualCoreMaskToPhysical(u64 v_core_mask) {
u64 p_core_mask = 0;
while (v_core_mask != 0) {
const u64 next = std::countr_zero(v_core_mask);
v_core_mask &= ~(static_cast<u64>(1) << next);
p_core_mask |= (static_cast<u64>(1) << VirtualToPhysicalCoreMap[next]);
}
return p_core_mask;
}
// Cortex-A57 supports 4 memory watchpoints
constexpr u64 NUM_WATCHPOINTS = 4;

View File

@ -11,8 +11,8 @@
namespace Core::HID {
constexpr s32 HID_JOYSTICK_MAX = 0x7fff;
constexpr s32 HID_JOYSTICK_MIN = 0x7ffe;
constexpr s32 HID_TRIGGER_MAX = 0x7fff;
constexpr u32 TURBO_BUTTON_DELAY = 4;
// Use a common UUID for TAS and Virtual Gamepad
constexpr Common::UUID TAS_UUID =
Common::UUID{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7, 0xA5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}};
@ -448,6 +448,7 @@ void EmulatedController::ReloadInput() {
},
});
}
turbo_button_state = 0;
}
void EmulatedController::UnloadInput() {
@ -688,6 +689,7 @@ void EmulatedController::SetButton(const Common::Input::CallbackStatus& callback
}
current_status.toggle = new_status.toggle;
current_status.turbo = new_status.turbo;
current_status.uuid = uuid;
// Update button status with current
@ -864,16 +866,9 @@ void EmulatedController::SetStick(const Common::Input::CallbackStatus& callback,
return;
}
const auto FloatToShort = [](float a) {
if (a > 0) {
return static_cast<s32>(a * HID_JOYSTICK_MAX);
}
return static_cast<s32>(a * HID_JOYSTICK_MIN);
};
const AnalogStickState stick{
.x = FloatToShort(controller.stick_values[index].x.value),
.y = FloatToShort(controller.stick_values[index].y.value),
.x = static_cast<s32>(controller.stick_values[index].x.value * HID_JOYSTICK_MAX),
.y = static_cast<s32>(controller.stick_values[index].y.value * HID_JOYSTICK_MAX),
};
switch (index) {
@ -1556,7 +1551,7 @@ NpadButtonState EmulatedController::GetNpadButtons() const {
if (is_configuring) {
return {};
}
return controller.npad_button_state;
return {controller.npad_button_state.raw & GetTurboButtonMask()};
}
DebugPadButton EmulatedController::GetDebugPadButtons() const {
@ -1664,4 +1659,74 @@ void EmulatedController::DeleteCallback(int key) {
}
callback_list.erase(iterator);
}
void EmulatedController::TurboButtonUpdate() {
turbo_button_state = (turbo_button_state + 1) % (TURBO_BUTTON_DELAY * 2);
}
NpadButton EmulatedController::GetTurboButtonMask() const {
// Apply no mask when disabled
if (turbo_button_state < TURBO_BUTTON_DELAY) {
return {NpadButton::All};
}
NpadButtonState button_mask{};
for (std::size_t index = 0; index < controller.button_values.size(); ++index) {
if (!controller.button_values[index].turbo) {
continue;
}
switch (index) {
case Settings::NativeButton::A:
button_mask.a.Assign(1);
break;
case Settings::NativeButton::B:
button_mask.b.Assign(1);
break;
case Settings::NativeButton::X:
button_mask.x.Assign(1);
break;
case Settings::NativeButton::Y:
button_mask.y.Assign(1);
break;
case Settings::NativeButton::L:
button_mask.l.Assign(1);
break;
case Settings::NativeButton::R:
button_mask.r.Assign(1);
break;
case Settings::NativeButton::ZL:
button_mask.zl.Assign(1);
break;
case Settings::NativeButton::ZR:
button_mask.zr.Assign(1);
break;
case Settings::NativeButton::DLeft:
button_mask.left.Assign(1);
break;
case Settings::NativeButton::DUp:
button_mask.up.Assign(1);
break;
case Settings::NativeButton::DRight:
button_mask.right.Assign(1);
break;
case Settings::NativeButton::DDown:
button_mask.down.Assign(1);
break;
case Settings::NativeButton::SL:
button_mask.left_sl.Assign(1);
button_mask.right_sl.Assign(1);
break;
case Settings::NativeButton::SR:
button_mask.left_sr.Assign(1);
button_mask.right_sr.Assign(1);
break;
default:
break;
}
}
return static_cast<NpadButton>(~button_mask.raw);
}
} // namespace Core::HID

View File

@ -411,6 +411,9 @@ public:
*/
void DeleteCallback(int key);
/// Swaps the state of the turbo buttons
void TurboButtonUpdate();
private:
/// creates input devices from params
void LoadDevices();
@ -511,6 +514,8 @@ private:
*/
void TriggerOnChange(ControllerTriggerType type, bool is_service_update);
NpadButton GetTurboButtonMask() const;
const NpadIdType npad_id_type;
NpadStyleIndex npad_type{NpadStyleIndex::None};
NpadStyleIndex original_npad_type{NpadStyleIndex::None};
@ -520,6 +525,7 @@ private:
bool system_buttons_enabled{true};
f32 motion_sensitivity{0.01f};
bool force_update_motion{false};
u32 turbo_button_state{0};
// Temporary values to avoid doing changes while the controller is in configuring mode
NpadStyleIndex tmp_npad_type{NpadStyleIndex::None};

View File

@ -11,6 +11,7 @@
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/scratch_buffer.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_auto_object.h"
@ -325,7 +326,7 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_threa
return ResultSuccess;
}
std::vector<u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
std::vector<u8> HLERequestContext::ReadBufferCopy(std::size_t buffer_index) const {
const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
BufferDescriptorA()[buffer_index].Size()};
if (is_buffer_a) {
@ -345,6 +346,33 @@ std::vector<u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
}
}
std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
static thread_local std::array<Common::ScratchBuffer<u8>, 2> read_buffer_a;
static thread_local std::array<Common::ScratchBuffer<u8>, 2> read_buffer_x;
const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
BufferDescriptorA()[buffer_index].Size()};
if (is_buffer_a) {
ASSERT_OR_EXECUTE_MSG(
BufferDescriptorA().size() > buffer_index, { return {}; },
"BufferDescriptorA invalid buffer_index {}", buffer_index);
auto& read_buffer = read_buffer_a[buffer_index];
read_buffer.resize_destructive(BufferDescriptorA()[buffer_index].Size());
memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), read_buffer.data(),
read_buffer.size());
return read_buffer;
} else {
ASSERT_OR_EXECUTE_MSG(
BufferDescriptorX().size() > buffer_index, { return {}; },
"BufferDescriptorX invalid buffer_index {}", buffer_index);
auto& read_buffer = read_buffer_x[buffer_index];
read_buffer.resize_destructive(BufferDescriptorX()[buffer_index].Size());
memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), read_buffer.data(),
read_buffer.size());
return read_buffer;
}
}
std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size,
std::size_t buffer_index) const {
if (size == 0) {

View File

@ -7,6 +7,7 @@
#include <functional>
#include <memory>
#include <optional>
#include <span>
#include <string>
#include <type_traits>
#include <vector>
@ -270,8 +271,11 @@ public:
return domain_message_header.has_value();
}
/// Helper function to read a buffer using the appropriate buffer descriptor
[[nodiscard]] std::vector<u8> ReadBuffer(std::size_t buffer_index = 0) const;
/// Helper function to get a span of a buffer using the appropriate buffer descriptor
[[nodiscard]] std::span<const u8> ReadBuffer(std::size_t buffer_index = 0) const;
/// Helper function to read a copy of a buffer using the appropriate buffer descriptor
[[nodiscard]] std::vector<u8> ReadBufferCopy(std::size_t buffer_index = 0) const;
/// Helper function to write a buffer using the appropriate buffer descriptor
std::size_t WriteBuffer(const void* buffer, std::size_t size,

View File

@ -11,6 +11,7 @@
#include "core/hle/kernel/init/init_slab_setup.h"
#include "core/hle/kernel/k_code_memory.h"
#include "core/hle/kernel/k_debug.h"
#include "core/hle/kernel/k_device_address_space.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_event_info.h"
#include "core/hle/kernel/k_memory_layout.h"
@ -43,6 +44,7 @@ namespace Kernel::Init {
HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \
HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ##__VA_ARGS__) \
HANDLER(KDeviceAddressSpace, (SLAB_COUNT(KDeviceAddressSpace)), ##__VA_ARGS__) \
HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \
HANDLER(KThreadLocalPage, \
(SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \

View File

@ -24,9 +24,7 @@ private:
friend class ::Kernel::KClassTokenGenerator; \
static constexpr inline auto ObjectType = ::Kernel::KClassTokenGenerator::ObjectType::CLASS; \
static constexpr inline const char* const TypeName = #CLASS; \
static constexpr inline ClassTokenType ClassToken() { \
return ::Kernel::ClassToken<CLASS>; \
} \
static constexpr inline ClassTokenType ClassToken() { return ::Kernel::ClassToken<CLASS>; } \
\
public: \
YUZU_NON_COPYABLE(CLASS); \
@ -37,15 +35,9 @@ public:
constexpr ClassTokenType Token = ClassToken(); \
return TypeObj(TypeName, Token); \
} \
static constexpr const char* GetStaticTypeName() { \
return TypeName; \
} \
virtual TypeObj GetTypeObj() ATTRIBUTE { \
return GetStaticTypeObj(); \
} \
virtual const char* GetTypeName() ATTRIBUTE { \
return GetStaticTypeName(); \
} \
static constexpr const char* GetStaticTypeName() { return TypeName; } \
virtual TypeObj GetTypeObj() ATTRIBUTE { return GetStaticTypeObj(); } \
virtual const char* GetTypeName() ATTRIBUTE { return GetStaticTypeName(); } \
\
private: \
constexpr bool operator!=(const TypeObj& rhs)
@ -245,8 +237,8 @@ public:
}
template <typename U>
requires(std::derived_from<T, U> ||
std::derived_from<U, T>) constexpr KScopedAutoObject(KScopedAutoObject<U>&& rhs) {
requires(std::derived_from<T, U> || std::derived_from<U, T>)
constexpr KScopedAutoObject(KScopedAutoObject<U>&& rhs) {
if constexpr (std::derived_from<U, T>) {
// Upcast.
m_obj = rhs.m_obj;

View File

@ -0,0 +1,358 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_capabilities.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/svc_version.h"
namespace Kernel {
Result KCapabilities::InitializeForKIP(std::span<const u32> kern_caps, KPageTable* page_table) {
// We're initializing an initial process.
m_svc_access_flags.reset();
m_irq_access_flags.reset();
m_debug_capabilities = 0;
m_handle_table_size = 0;
m_intended_kernel_version = 0;
m_program_type = 0;
// Initial processes may run on all cores.
constexpr u64 VirtMask = Core::Hardware::VirtualCoreMask;
constexpr u64 PhysMask = Core::Hardware::ConvertVirtualCoreMaskToPhysical(VirtMask);
m_core_mask = VirtMask;
m_phys_core_mask = PhysMask;
// Initial processes may use any user priority they like.
m_priority_mask = ~0xFULL;
// Here, Nintendo sets the kernel version to the current kernel version.
// We will follow suit and set the version to the highest supported kernel version.
KernelVersion intended_kernel_version{};
intended_kernel_version.major_version.Assign(Svc::SupportedKernelMajorVersion);
intended_kernel_version.minor_version.Assign(Svc::SupportedKernelMinorVersion);
m_intended_kernel_version = intended_kernel_version.raw;
// Parse the capabilities array.
R_RETURN(this->SetCapabilities(kern_caps, page_table));
}
Result KCapabilities::InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table) {
// We're initializing a user process.
m_svc_access_flags.reset();
m_irq_access_flags.reset();
m_debug_capabilities = 0;
m_handle_table_size = 0;
m_intended_kernel_version = 0;
m_program_type = 0;
// User processes must specify what cores/priorities they can use.
m_core_mask = 0;
m_priority_mask = 0;
// Parse the user capabilities array.
R_RETURN(this->SetCapabilities(user_caps, page_table));
}
Result KCapabilities::SetCorePriorityCapability(const u32 cap) {
// We can't set core/priority if we've already set them.
R_UNLESS(m_core_mask == 0, ResultInvalidArgument);
R_UNLESS(m_priority_mask == 0, ResultInvalidArgument);
// Validate the core/priority.
CorePriority pack{cap};
const u32 min_core = pack.minimum_core_id;
const u32 max_core = pack.maximum_core_id;
const u32 max_prio = pack.lowest_thread_priority;
const u32 min_prio = pack.highest_thread_priority;
R_UNLESS(min_core <= max_core, ResultInvalidCombination);
R_UNLESS(min_prio <= max_prio, ResultInvalidCombination);
R_UNLESS(max_core < Core::Hardware::NumVirtualCores, ResultInvalidCoreId);
ASSERT(max_prio < Common::BitSize<u64>());
// Set core mask.
for (auto core_id = min_core; core_id <= max_core; core_id++) {
m_core_mask |= (1ULL << core_id);
}
ASSERT((m_core_mask & Core::Hardware::VirtualCoreMask) == m_core_mask);
// Set physical core mask.
m_phys_core_mask = Core::Hardware::ConvertVirtualCoreMaskToPhysical(m_core_mask);
// Set priority mask.
for (auto prio = min_prio; prio <= max_prio; prio++) {
m_priority_mask |= (1ULL << prio);
}
// We must have some core/priority we can use.
R_UNLESS(m_core_mask != 0, ResultInvalidArgument);
R_UNLESS(m_priority_mask != 0, ResultInvalidArgument);
// Processes must not have access to kernel thread priorities.
R_UNLESS((m_priority_mask & 0xF) == 0, ResultInvalidArgument);
R_SUCCEED();
}
Result KCapabilities::SetSyscallMaskCapability(const u32 cap, u32& set_svc) {
// Validate the index.
SyscallMask pack{cap};
const u32 mask = pack.mask;
const u32 index = pack.index;
const u32 index_flag = (1U << index);
R_UNLESS((set_svc & index_flag) == 0, ResultInvalidCombination);
set_svc |= index_flag;
// Set SVCs.
for (size_t i = 0; i < decltype(SyscallMask::mask)::bits; i++) {
const u32 svc_id = static_cast<u32>(decltype(SyscallMask::mask)::bits * index + i);
if (mask & (1U << i)) {
R_UNLESS(this->SetSvcAllowed(svc_id), ResultOutOfRange);
}
}
R_SUCCEED();
}
Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table) {
const auto range_pack = MapRange{cap};
const auto size_pack = MapRangeSize{size_cap};
// Get/validate address/size
const u64 phys_addr = range_pack.address.Value() * PageSize;
// Validate reserved bits are unused.
R_UNLESS(size_pack.reserved.Value() == 0, ResultOutOfRange);
const size_t num_pages = size_pack.pages;
const size_t size = num_pages * PageSize;
R_UNLESS(num_pages != 0, ResultInvalidSize);
R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress);
R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress);
// Do the mapping.
[[maybe_unused]] const KMemoryPermission perm = range_pack.read_only.Value()
? KMemoryPermission::UserRead
: KMemoryPermission::UserReadWrite;
if (MapRangeSize{size_cap}.normal) {
// R_RETURN(page_table->MapStatic(phys_addr, size, perm));
} else {
// R_RETURN(page_table->MapIo(phys_addr, size, perm));
}
UNIMPLEMENTED();
R_SUCCEED();
}
Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) {
// Get/validate address/size
const u64 phys_addr = MapIoPage{cap}.address.Value() * PageSize;
const size_t num_pages = 1;
const size_t size = num_pages * PageSize;
R_UNLESS(num_pages != 0, ResultInvalidSize);
R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress);
R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress);
// Do the mapping.
// R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission_UserReadWrite));
UNIMPLEMENTED();
R_SUCCEED();
}
template <typename F>
Result KCapabilities::ProcessMapRegionCapability(const u32 cap, F f) {
// Define the allowed memory regions.
constexpr std::array<KMemoryRegionType, 4> MemoryRegions{
KMemoryRegionType_None,
KMemoryRegionType_KernelTraceBuffer,
KMemoryRegionType_OnMemoryBootImage,
KMemoryRegionType_DTB,
};
// Extract regions/read only.
const MapRegion pack{cap};
const std::array<RegionType, 3> types{pack.region0, pack.region1, pack.region2};
const std::array<u32, 3> ro{pack.read_only0, pack.read_only1, pack.read_only2};
for (size_t i = 0; i < types.size(); i++) {
const auto type = types[i];
const auto perm = ro[i] ? KMemoryPermission::UserRead : KMemoryPermission::UserReadWrite;
switch (type) {
case RegionType::NoMapping:
break;
case RegionType::KernelTraceBuffer:
case RegionType::OnMemoryBootImage:
case RegionType::DTB:
R_TRY(f(MemoryRegions[static_cast<u32>(type)], perm));
break;
default:
R_THROW(ResultNotFound);
}
}
R_SUCCEED();
}
Result KCapabilities::MapRegion_(const u32 cap, KPageTable* page_table) {
// Map each region into the process's page table.
R_RETURN(ProcessMapRegionCapability(
cap, [](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
// R_RETURN(page_table->MapRegion(region_type, perm));
UNIMPLEMENTED();
R_SUCCEED();
}));
}
Result KCapabilities::CheckMapRegion(KernelCore& kernel, const u32 cap) {
// Check that each region has a physical backing store.
R_RETURN(ProcessMapRegionCapability(
cap, [&](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
R_UNLESS(kernel.MemoryLayout().GetPhysicalMemoryRegionTree().FindFirstDerived(
region_type) != nullptr,
ResultOutOfRange);
R_SUCCEED();
}));
}
Result KCapabilities::SetInterruptPairCapability(const u32 cap) {
// Extract interrupts.
const InterruptPair pack{cap};
const std::array<u32, 2> ids{pack.interrupt_id0, pack.interrupt_id1};
for (size_t i = 0; i < ids.size(); i++) {
if (ids[i] != PaddingInterruptId) {
UNIMPLEMENTED();
// R_UNLESS(Kernel::GetInterruptManager().IsInterruptDefined(ids[i]), ResultOutOfRange);
// R_UNLESS(this->SetInterruptPermitted(ids[i]), ResultOutOfRange);
}
}
R_SUCCEED();
}
Result KCapabilities::SetProgramTypeCapability(const u32 cap) {
// Validate.
const ProgramType pack{cap};
R_UNLESS(pack.reserved == 0, ResultReservedUsed);
m_program_type = pack.type;
R_SUCCEED();
}
Result KCapabilities::SetKernelVersionCapability(const u32 cap) {
// Ensure we haven't set our version before.
R_UNLESS(KernelVersion{m_intended_kernel_version}.major_version == 0, ResultInvalidArgument);
// Set, ensure that we set a valid version.
m_intended_kernel_version = cap;
R_UNLESS(KernelVersion{m_intended_kernel_version}.major_version != 0, ResultInvalidArgument);
R_SUCCEED();
}
Result KCapabilities::SetHandleTableCapability(const u32 cap) {
// Validate.
const HandleTable pack{cap};
R_UNLESS(pack.reserved == 0, ResultReservedUsed);
m_handle_table_size = pack.size;
R_SUCCEED();
}
Result KCapabilities::SetDebugFlagsCapability(const u32 cap) {
// Validate.
const DebugFlags pack{cap};
R_UNLESS(pack.reserved == 0, ResultReservedUsed);
DebugFlags debug_capabilities{m_debug_capabilities};
debug_capabilities.allow_debug.Assign(pack.allow_debug);
debug_capabilities.force_debug.Assign(pack.force_debug);
m_debug_capabilities = debug_capabilities.raw;
R_SUCCEED();
}
Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
KPageTable* page_table) {
// Validate this is a capability we can act on.
const auto type = GetCapabilityType(cap);
R_UNLESS(type != CapabilityType::Invalid, ResultInvalidArgument);
// If the type is padding, we have no work to do.
R_SUCCEED_IF(type == CapabilityType::Padding);
// Check that we haven't already processed this capability.
const auto flag = GetCapabilityFlag(type);
R_UNLESS(((set_flags & InitializeOnceFlags) & flag) == 0, ResultInvalidCombination);
set_flags |= flag;
// Process the capability.
switch (type) {
case CapabilityType::CorePriority:
R_RETURN(this->SetCorePriorityCapability(cap));
case CapabilityType::SyscallMask:
R_RETURN(this->SetSyscallMaskCapability(cap, set_svc));
case CapabilityType::MapIoPage:
R_RETURN(this->MapIoPage_(cap, page_table));
case CapabilityType::MapRegion:
R_RETURN(this->MapRegion_(cap, page_table));
case CapabilityType::InterruptPair:
R_RETURN(this->SetInterruptPairCapability(cap));
case CapabilityType::ProgramType:
R_RETURN(this->SetProgramTypeCapability(cap));
case CapabilityType::KernelVersion:
R_RETURN(this->SetKernelVersionCapability(cap));
case CapabilityType::HandleTable:
R_RETURN(this->SetHandleTableCapability(cap));
case CapabilityType::DebugFlags:
R_RETURN(this->SetDebugFlagsCapability(cap));
default:
R_THROW(ResultInvalidArgument);
}
}
Result KCapabilities::SetCapabilities(std::span<const u32> caps, KPageTable* page_table) {
u32 set_flags = 0, set_svc = 0;
for (size_t i = 0; i < caps.size(); i++) {
const u32 cap{caps[i]};
if (GetCapabilityType(cap) == CapabilityType::MapRange) {
// Check that the pair cap exists.
R_UNLESS((++i) < caps.size(), ResultInvalidCombination);
// Check the pair cap is a map range cap.
const u32 size_cap{caps[i]};
R_UNLESS(GetCapabilityType(size_cap) == CapabilityType::MapRange,
ResultInvalidCombination);
// Map the range.
R_TRY(this->MapRange_(cap, size_cap, page_table));
} else {
R_TRY(this->SetCapability(cap, set_flags, set_svc, page_table));
}
}
R_SUCCEED();
}
Result KCapabilities::CheckCapabilities(KernelCore& kernel, std::span<const u32> caps) {
for (auto cap : caps) {
// Check the capability refers to a valid region.
if (GetCapabilityType(cap) == CapabilityType::MapRegion) {
R_TRY(CheckMapRegion(kernel, cap));
}
}
R_SUCCEED();
}
} // namespace Kernel

View File

@ -0,0 +1,295 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <bitset>
#include <span>
#include "common/bit_field.h"
#include "common/common_types.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"
namespace Kernel {
class KPageTable;
class KernelCore;
class KCapabilities {
public:
constexpr explicit KCapabilities() = default;
Result InitializeForKIP(std::span<const u32> kern_caps, KPageTable* page_table);
Result InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table);
static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps);
constexpr u64 GetCoreMask() const {
return m_core_mask;
}
constexpr u64 GetPhysicalCoreMask() const {
return m_phys_core_mask;
}
constexpr u64 GetPriorityMask() const {
return m_priority_mask;
}
constexpr s32 GetHandleTableSize() const {
return m_handle_table_size;
}
constexpr const Svc::SvcAccessFlagSet& GetSvcPermissions() const {
return m_svc_access_flags;
}
constexpr bool IsPermittedSvc(u32 id) const {
return (id < m_svc_access_flags.size()) && m_svc_access_flags[id];
}
constexpr bool IsPermittedInterrupt(u32 id) const {
return (id < m_irq_access_flags.size()) && m_irq_access_flags[id];
}
constexpr bool IsPermittedDebug() const {
return DebugFlags{m_debug_capabilities}.allow_debug.Value() != 0;
}
constexpr bool CanForceDebug() const {
return DebugFlags{m_debug_capabilities}.force_debug.Value() != 0;
}
constexpr u32 GetIntendedKernelMajorVersion() const {
return KernelVersion{m_intended_kernel_version}.major_version;
}
constexpr u32 GetIntendedKernelMinorVersion() const {
return KernelVersion{m_intended_kernel_version}.minor_version;
}
private:
static constexpr size_t InterruptIdCount = 0x400;
using InterruptFlagSet = std::bitset<InterruptIdCount>;
enum class CapabilityType : u32 {
CorePriority = (1U << 3) - 1,
SyscallMask = (1U << 4) - 1,
MapRange = (1U << 6) - 1,
MapIoPage = (1U << 7) - 1,
MapRegion = (1U << 10) - 1,
InterruptPair = (1U << 11) - 1,
ProgramType = (1U << 13) - 1,
KernelVersion = (1U << 14) - 1,
HandleTable = (1U << 15) - 1,
DebugFlags = (1U << 16) - 1,
Invalid = 0U,
Padding = ~0U,
};
using RawCapabilityValue = u32;
static constexpr CapabilityType GetCapabilityType(const RawCapabilityValue value) {
return static_cast<CapabilityType>((~value & (value + 1)) - 1);
}
static constexpr u32 GetCapabilityFlag(CapabilityType type) {
return static_cast<u32>(type) + 1;
}
template <CapabilityType Type>
static constexpr inline u32 CapabilityFlag = static_cast<u32>(Type) + 1;
template <CapabilityType Type>
static constexpr inline u32 CapabilityId = std::countr_zero(CapabilityFlag<Type>);
union CorePriority {
static_assert(CapabilityId<CapabilityType::CorePriority> + 1 == 4);
RawCapabilityValue raw;
BitField<0, 4, CapabilityType> id;
BitField<4, 6, u32> lowest_thread_priority;
BitField<10, 6, u32> highest_thread_priority;
BitField<16, 8, u32> minimum_core_id;
BitField<24, 8, u32> maximum_core_id;
};
union SyscallMask {
static_assert(CapabilityId<CapabilityType::SyscallMask> + 1 == 5);
RawCapabilityValue raw;
BitField<0, 5, CapabilityType> id;
BitField<5, 24, u32> mask;
BitField<29, 3, u32> index;
};
// #undef MESOSPHERE_ENABLE_LARGE_PHYSICAL_ADDRESS_CAPABILITIES
static constexpr u64 PhysicalMapAllowedMask = (1ULL << 36) - 1;
union MapRange {
static_assert(CapabilityId<CapabilityType::MapRange> + 1 == 7);
RawCapabilityValue raw;
BitField<0, 7, CapabilityType> id;
BitField<7, 24, u32> address;
BitField<31, 1, u32> read_only;
};
union MapRangeSize {
static_assert(CapabilityId<CapabilityType::MapRange> + 1 == 7);
RawCapabilityValue raw;
BitField<0, 7, CapabilityType> id;
BitField<7, 20, u32> pages;
BitField<27, 4, u32> reserved;
BitField<31, 1, u32> normal;
};
union MapIoPage {
static_assert(CapabilityId<CapabilityType::MapIoPage> + 1 == 8);
RawCapabilityValue raw;
BitField<0, 8, CapabilityType> id;
BitField<8, 24, u32> address;
};
enum class RegionType : u32 {
NoMapping = 0,
KernelTraceBuffer = 1,
OnMemoryBootImage = 2,
DTB = 3,
};
union MapRegion {
static_assert(CapabilityId<CapabilityType::MapRegion> + 1 == 11);
RawCapabilityValue raw;
BitField<0, 11, CapabilityType> id;
BitField<11, 6, RegionType> region0;
BitField<17, 1, u32> read_only0;
BitField<18, 6, RegionType> region1;
BitField<24, 1, u32> read_only1;
BitField<25, 6, RegionType> region2;
BitField<31, 1, u32> read_only2;
};
union InterruptPair {
static_assert(CapabilityId<CapabilityType::InterruptPair> + 1 == 12);
RawCapabilityValue raw;
BitField<0, 12, CapabilityType> id;
BitField<12, 10, u32> interrupt_id0;
BitField<22, 10, u32> interrupt_id1;
};
union ProgramType {
static_assert(CapabilityId<CapabilityType::ProgramType> + 1 == 14);
RawCapabilityValue raw;
BitField<0, 14, CapabilityType> id;
BitField<14, 3, u32> type;
BitField<17, 15, u32> reserved;
};
union KernelVersion {
static_assert(CapabilityId<CapabilityType::KernelVersion> + 1 == 15);
RawCapabilityValue raw;
BitField<0, 15, CapabilityType> id;
BitField<15, 4, u32> major_version;
BitField<19, 13, u32> minor_version;
};
union HandleTable {
static_assert(CapabilityId<CapabilityType::HandleTable> + 1 == 16);
RawCapabilityValue raw;
BitField<0, 16, CapabilityType> id;
BitField<16, 10, u32> size;
BitField<26, 6, u32> reserved;
};
union DebugFlags {
static_assert(CapabilityId<CapabilityType::DebugFlags> + 1 == 17);
RawCapabilityValue raw;
BitField<0, 17, CapabilityType> id;
BitField<17, 1, u32> allow_debug;
BitField<18, 1, u32> force_debug;
BitField<19, 13, u32> reserved;
};
static_assert(sizeof(CorePriority) == 4);
static_assert(sizeof(SyscallMask) == 4);
static_assert(sizeof(MapRange) == 4);
static_assert(sizeof(MapRangeSize) == 4);
static_assert(sizeof(MapIoPage) == 4);
static_assert(sizeof(MapRegion) == 4);
static_assert(sizeof(InterruptPair) == 4);
static_assert(sizeof(ProgramType) == 4);
static_assert(sizeof(KernelVersion) == 4);
static_assert(sizeof(HandleTable) == 4);
static_assert(sizeof(DebugFlags) == 4);
static constexpr u32 InitializeOnceFlags =
CapabilityFlag<CapabilityType::CorePriority> | CapabilityFlag<CapabilityType::ProgramType> |
CapabilityFlag<CapabilityType::KernelVersion> |
CapabilityFlag<CapabilityType::HandleTable> | CapabilityFlag<CapabilityType::DebugFlags>;
static const u32 PaddingInterruptId = 0x3FF;
static_assert(PaddingInterruptId < InterruptIdCount);
private:
constexpr bool SetSvcAllowed(u32 id) {
if (id < m_svc_access_flags.size()) [[likely]] {
m_svc_access_flags[id] = true;
return true;
} else {
return false;
}
}
constexpr bool SetInterruptPermitted(u32 id) {
if (id < m_irq_access_flags.size()) [[likely]] {
m_irq_access_flags[id] = true;
return true;
} else {
return false;
}
}
Result SetCorePriorityCapability(const u32 cap);
Result SetSyscallMaskCapability(const u32 cap, u32& set_svc);
Result MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table);
Result MapIoPage_(const u32 cap, KPageTable* page_table);
Result MapRegion_(const u32 cap, KPageTable* page_table);
Result SetInterruptPairCapability(const u32 cap);
Result SetProgramTypeCapability(const u32 cap);
Result SetKernelVersionCapability(const u32 cap);
Result SetHandleTableCapability(const u32 cap);
Result SetDebugFlagsCapability(const u32 cap);
template <typename F>
static Result ProcessMapRegionCapability(const u32 cap, F f);
static Result CheckMapRegion(KernelCore& kernel, const u32 cap);
Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc, KPageTable* page_table);
Result SetCapabilities(std::span<const u32> caps, KPageTable* page_table);
private:
Svc::SvcAccessFlagSet m_svc_access_flags{};
InterruptFlagSet m_irq_access_flags{};
u64 m_core_mask{};
u64 m_phys_core_mask{};
u64 m_priority_mask{};
u32 m_debug_capabilities{};
s32 m_handle_table_size{};
u32 m_intended_kernel_version{};
u32 m_program_type{};
};
} // namespace Kernel

View File

@ -74,7 +74,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
R_UNLESS(!m_is_mapped, ResultInvalidState);
// Map the memory.
R_TRY(kernel.CurrentProcess()->PageTable().MapPages(
R_TRY(kernel.CurrentProcess()->PageTable().MapPageGroup(
address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
// Mark ourselves as mapped.
@ -91,8 +91,8 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
KScopedLightLock lk(m_lock);
// Unmap the memory.
R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, *m_page_group,
KMemoryState::CodeOut));
R_TRY(kernel.CurrentProcess()->PageTable().UnmapPageGroup(address, *m_page_group,
KMemoryState::CodeOut));
// Mark ourselves as unmapped.
m_is_mapped = false;
@ -125,8 +125,8 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
}
// Map the memory.
R_TRY(
m_owner->PageTable().MapPages(address, *m_page_group, KMemoryState::GeneratedCode, k_perm));
R_TRY(m_owner->PageTable().MapPageGroup(address, *m_page_group, KMemoryState::GeneratedCode,
k_perm));
// Mark ourselves as mapped.
m_is_owner_mapped = true;
@ -142,7 +142,7 @@ Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
KScopedLightLock lk(m_lock);
// Unmap the memory.
R_TRY(m_owner->PageTable().UnmapPages(address, *m_page_group, KMemoryState::GeneratedCode));
R_TRY(m_owner->PageTable().UnmapPageGroup(address, *m_page_group, KMemoryState::GeneratedCode));
// Mark ourselves as unmapped.
m_is_owner_mapped = false;

View File

@ -171,7 +171,7 @@ Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value)
R_UNLESS(owner_thread != nullptr, ResultInvalidHandle);
// Update the lock.
cur_thread->SetAddressKey(addr, value);
cur_thread->SetUserAddressKey(addr, value);
owner_thread->AddWaiter(cur_thread);
// Begin waiting.

View File

@ -0,0 +1,150 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
#include "core/core.h"
#include "core/hle/kernel/k_device_address_space.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel {
KDeviceAddressSpace::KDeviceAddressSpace(KernelCore& kernel_)
: KAutoObjectWithSlabHeapAndContainer(kernel_), m_lock(kernel_), m_is_initialized(false) {}
KDeviceAddressSpace::~KDeviceAddressSpace() = default;
void KDeviceAddressSpace::Initialize() {
// This just forwards to the device page table manager.
// KDevicePageTable::Initialize();
}
// Member functions.
Result KDeviceAddressSpace::Initialize(u64 address, u64 size) {
// Initialize the device page table.
// R_TRY(m_table.Initialize(address, size));
// Set member variables.
m_space_address = address;
m_space_size = size;
m_is_initialized = true;
R_SUCCEED();
}
void KDeviceAddressSpace::Finalize() {
// Finalize the table.
// m_table.Finalize();
}
Result KDeviceAddressSpace::Attach(Svc::DeviceName device_name) {
// Lock the address space.
KScopedLightLock lk(m_lock);
// Attach.
// R_RETURN(m_table.Attach(device_name, m_space_address, m_space_size));
R_SUCCEED();
}
Result KDeviceAddressSpace::Detach(Svc::DeviceName device_name) {
// Lock the address space.
KScopedLightLock lk(m_lock);
// Detach.
// R_RETURN(m_table.Detach(device_name));
R_SUCCEED();
}
Result KDeviceAddressSpace::Map(KPageTable* page_table, VAddr process_address, size_t size,
u64 device_address, u32 option, bool is_aligned) {
// Check that the address falls within the space.
R_UNLESS((m_space_address <= device_address &&
device_address + size - 1 <= m_space_address + m_space_size - 1),
ResultInvalidCurrentMemory);
// Decode the option.
const Svc::MapDeviceAddressSpaceOption option_pack{option};
const auto device_perm = option_pack.permission.Value();
const auto flags = option_pack.flags.Value();
const auto reserved = option_pack.reserved.Value();
// Validate the option.
// TODO: It is likely that this check for flags == none is only on NX board.
R_UNLESS(flags == Svc::MapDeviceAddressSpaceFlag::None, ResultInvalidEnumValue);
R_UNLESS(reserved == 0, ResultInvalidEnumValue);
// Lock the address space.
KScopedLightLock lk(m_lock);
// Lock the page table to prevent concurrent device mapping operations.
// KScopedLightLock pt_lk = page_table->AcquireDeviceMapLock();
// Lock the pages.
bool is_io{};
R_TRY(page_table->LockForMapDeviceAddressSpace(std::addressof(is_io), process_address, size,
ConvertToKMemoryPermission(device_perm),
is_aligned, true));
// Ensure that if we fail, we don't keep unmapped pages locked.
ON_RESULT_FAILURE {
ASSERT(page_table->UnlockForDeviceAddressSpace(process_address, size) == ResultSuccess);
};
// Check that the io status is allowable.
if (is_io) {
R_UNLESS(static_cast<u32>(flags & Svc::MapDeviceAddressSpaceFlag::NotIoRegister) == 0,
ResultInvalidCombination);
}
// Map the pages.
{
// Perform the mapping.
// R_TRY(m_table.Map(page_table, process_address, size, device_address, device_perm,
// is_aligned, is_io));
// Ensure that we unmap the pages if we fail to update the protections.
// NOTE: Nintendo does not check the result of this unmap call.
// ON_RESULT_FAILURE { m_table.Unmap(device_address, size); };
// Update the protections in accordance with how much we mapped.
// R_TRY(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size));
}
// We succeeded.
R_SUCCEED();
}
Result KDeviceAddressSpace::Unmap(KPageTable* page_table, VAddr process_address, size_t size,
u64 device_address) {
// Check that the address falls within the space.
R_UNLESS((m_space_address <= device_address &&
device_address + size - 1 <= m_space_address + m_space_size - 1),
ResultInvalidCurrentMemory);
// Lock the address space.
KScopedLightLock lk(m_lock);
// Lock the page table to prevent concurrent device mapping operations.
// KScopedLightLock pt_lk = page_table->AcquireDeviceMapLock();
// Lock the pages.
R_TRY(page_table->LockForUnmapDeviceAddressSpace(process_address, size, true));
// Unmap the pages.
{
// If we fail to unmap, we want to do a partial unlock.
// ON_RESULT_FAILURE {
// ASSERT(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size) ==
// ResultSuccess);
// };
// Perform the unmap.
// R_TRY(m_table.Unmap(page_table, process_address, size, device_address));
}
// Unlock the pages.
ASSERT(page_table->UnlockForDeviceAddressSpace(process_address, size) == ResultSuccess);
R_SUCCEED();
}
} // namespace Kernel

View File

@ -0,0 +1,60 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <string>
#include "common/common_types.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class KDeviceAddressSpace final
: public KAutoObjectWithSlabHeapAndContainer<KDeviceAddressSpace, KAutoObjectWithList> {
KERNEL_AUTOOBJECT_TRAITS(KDeviceAddressSpace, KAutoObject);
public:
explicit KDeviceAddressSpace(KernelCore& kernel);
~KDeviceAddressSpace();
Result Initialize(u64 address, u64 size);
void Finalize();
bool IsInitialized() const {
return m_is_initialized;
}
static void PostDestroy(uintptr_t arg) {}
Result Attach(Svc::DeviceName device_name);
Result Detach(Svc::DeviceName device_name);
Result MapByForce(KPageTable* page_table, VAddr process_address, size_t size,
u64 device_address, u32 option) {
R_RETURN(this->Map(page_table, process_address, size, device_address, option, false));
}
Result MapAligned(KPageTable* page_table, VAddr process_address, size_t size,
u64 device_address, u32 option) {
R_RETURN(this->Map(page_table, process_address, size, device_address, option, true));
}
Result Unmap(KPageTable* page_table, VAddr process_address, size_t size, u64 device_address);
static void Initialize();
private:
Result Map(KPageTable* page_table, VAddr process_address, size_t size, u64 device_address,
u32 option, bool is_aligned);
private:
KLightLock m_lock;
// KDevicePageTable m_table;
u64 m_space_address{};
u64 m_space_size{};
bool m_is_initialized{};
};
} // namespace Kernel

View File

@ -68,7 +68,7 @@ bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
// Add the current thread as a waiter on the owner.
KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ULL);
cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
cur_thread->SetKernelAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
owner_thread->AddWaiter(cur_thread);
// Begin waiting to hold the lock.

View File

@ -67,9 +67,9 @@ constexpr size_t KernelPageBufferAdditionalSize = 0x33C000;
constexpr std::size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize +
KernelSlabHeapSize + KernelPageBufferHeapSize;
constexpr bool IsKernelAddressKey(VAddr key) {
return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast;
}
//! NB: Use KThread::GetAddressKeyIsKernel().
//! See explanation for deviation of GetAddressKey.
bool IsKernelAddressKey(VAddr key) = delete;
constexpr bool IsKernelAddress(VAddr address) {
return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd;

View File

@ -435,6 +435,9 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
KPageGroup pg{m_kernel, m_block_info_manager};
AddRegionToPages(src_address, num_pages, pg);
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Reprotect the source as kernel-read/not mapped.
const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead |
KMemoryPermission::NotMapped);
@ -447,7 +450,10 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
});
// Map the alias pages.
R_TRY(MapPages(dst_address, pg, new_perm));
const KPageProperties dst_properties = {new_perm, false, false,
DisableMergeAttribute::DisableHead};
R_TRY(
this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
// We successfully mapped the alias pages, so we don't need to unprotect the src pages on
// failure.
@ -1881,7 +1887,8 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
R_SUCCEED();
}
Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) {
Result KPageTable::MapMemory(KProcessAddress dst_address, KProcessAddress src_address,
size_t size) {
// Lock the table.
KScopedLightLock lk(m_general_lock);
@ -1902,53 +1909,73 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size)
KMemoryAttribute::None));
// Create an update allocator for the source.
Result src_allocator_result{ResultSuccess};
Result src_allocator_result;
KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
m_memory_block_slab_manager,
num_src_allocator_blocks);
R_TRY(src_allocator_result);
// Create an update allocator for the destination.
Result dst_allocator_result{ResultSuccess};
Result dst_allocator_result;
KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
m_memory_block_slab_manager,
num_dst_allocator_blocks);
R_TRY(dst_allocator_result);
// Map the memory.
KPageGroup page_linked_list{m_kernel, m_block_info_manager};
const size_t num_pages{size / PageSize};
const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
AddRegionToPages(src_address, num_pages, page_linked_list);
{
// Determine the number of pages being operated on.
const size_t num_pages = size / PageSize;
// Create page groups for the memory being unmapped.
KPageGroup pg{m_kernel, m_block_info_manager};
// Create the page group representing the source.
R_TRY(this->MakePageGroup(pg, src_address, num_pages));
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Reprotect the source as kernel-read/not mapped.
auto block_guard = detail::ScopeExit([&] {
Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
OperationType::ChangePermissions);
});
R_TRY(Operate(src_address, num_pages, new_src_perm, OperationType::ChangePermissions));
R_TRY(MapPages(dst_address, page_linked_list, KMemoryPermission::UserReadWrite));
const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
const KPageProperties src_properties = {new_src_perm, false, false,
DisableMergeAttribute::DisableHeadBodyTail};
R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
OperationType::ChangePermissions));
block_guard.Cancel();
// Ensure that we unprotect the source pages on failure.
ON_RESULT_FAILURE {
const KPageProperties unprotect_properties = {
KMemoryPermission::UserReadWrite, false, false,
DisableMergeAttribute::EnableHeadBodyTail};
ASSERT(this->Operate(src_address, num_pages, unprotect_properties.perm,
OperationType::ChangePermissions) == ResultSuccess);
};
// Map the alias pages.
const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false,
DisableMergeAttribute::DisableHead};
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties,
false));
// Apply the memory block updates.
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
src_state, new_src_perm, new_src_attr,
KMemoryBlockDisableMergeAttribute::Locked,
KMemoryBlockDisableMergeAttribute::None);
m_memory_block_manager.Update(
std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack,
KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
}
// Apply the memory block updates.
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state,
new_src_perm, new_src_attr,
KMemoryBlockDisableMergeAttribute::Locked,
KMemoryBlockDisableMergeAttribute::None);
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
KMemoryState::Stack, KMemoryPermission::UserReadWrite,
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
KMemoryBlockDisableMergeAttribute::None);
R_SUCCEED();
}
Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) {
Result KPageTable::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address,
size_t size) {
// Lock the table.
KScopedLightLock lk(m_general_lock);
@ -1970,108 +1997,208 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size
KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
// Create an update allocator for the source.
Result src_allocator_result{ResultSuccess};
Result src_allocator_result;
KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
m_memory_block_slab_manager,
num_src_allocator_blocks);
R_TRY(src_allocator_result);
// Create an update allocator for the destination.
Result dst_allocator_result{ResultSuccess};
Result dst_allocator_result;
KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
m_memory_block_slab_manager,
num_dst_allocator_blocks);
R_TRY(dst_allocator_result);
KPageGroup src_pages{m_kernel, m_block_info_manager};
KPageGroup dst_pages{m_kernel, m_block_info_manager};
const size_t num_pages{size / PageSize};
AddRegionToPages(src_address, num_pages, src_pages);
AddRegionToPages(dst_address, num_pages, dst_pages);
R_UNLESS(dst_pages.IsEquivalentTo(src_pages), ResultInvalidMemoryRegion);
// Unmap the memory.
{
auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); });
// Determine the number of pages being operated on.
const size_t num_pages = size / PageSize;
R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
OperationType::ChangePermissions));
// Create page groups for the memory being unmapped.
KPageGroup pg{m_kernel, m_block_info_manager};
block_guard.Cancel();
// Create the page group representing the destination.
R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
// Ensure the page group is the valid for the source.
R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Unmap the aliased copy of the pages.
const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
DisableMergeAttribute::None};
R_TRY(
this->Operate(dst_address, num_pages, dst_unmap_properties.perm, OperationType::Unmap));
// Ensure that we re-map the aliased pages on failure.
ON_RESULT_FAILURE {
this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
};
// Try to set the permissions for the source pages back to what they should be.
const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
DisableMergeAttribute::EnableAndMergeHeadBodyTail};
R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
OperationType::ChangePermissions));
// Apply the memory block updates.
m_memory_block_manager.Update(
std::addressof(src_allocator), src_address, num_pages, src_state,
KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
m_memory_block_manager.Update(
std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
KMemoryPermission::None, KMemoryAttribute::None,
KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
}
// Apply the memory block updates.
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state,
KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
KMemoryBlockDisableMergeAttribute::None,
KMemoryBlockDisableMergeAttribute::Locked);
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
KMemoryState::None, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
KMemoryBlockDisableMergeAttribute::Normal);
R_SUCCEED();
}
Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
KMemoryPermission perm) {
Result KPageTable::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
size_t num_pages, KMemoryPermission perm) {
ASSERT(this->IsLockedByCurrentThread());
VAddr cur_addr{addr};
// Create a page group to hold the pages we allocate.
KPageGroup pg{m_kernel, m_block_info_manager};
for (const auto& node : page_linked_list) {
if (const auto result{
Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())};
result.IsError()) {
const size_t num_pages{(addr - cur_addr) / PageSize};
// Allocate the pages.
R_TRY(
m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)
.IsSuccess());
// Ensure that the page group is closed when we're done working with it.
SCOPE_EXIT({ pg.Close(); });
R_RETURN(result);
}
cur_addr += node.GetNumPages() * PageSize;
// Clear all pages.
for (const auto& it : pg) {
std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
it.GetSize());
}
R_SUCCEED();
}
Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state,
KMemoryPermission perm) {
// Check that the map is in range.
const size_t num_pages{page_linked_list.GetNumPages()};
const size_t size{num_pages * PageSize};
R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
// Lock the table.
KScopedLightLock lk(m_general_lock);
// Check the memory state.
R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryAttribute::None));
// Create an update allocator.
Result allocator_result{ResultSuccess};
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager);
// Map the pages.
R_TRY(MapPages(address, page_linked_list, perm));
R_RETURN(this->Operate(address, num_pages, pg, OperationType::MapGroup));
}
// Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
KMemoryBlockDisableMergeAttribute::None);
Result KPageTable::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
const KPageGroup& pg, const KPageProperties properties,
bool reuse_ll) {
ASSERT(this->IsLockedByCurrentThread());
// Note the current address, so that we can iterate.
const KProcessAddress start_address = address;
KProcessAddress cur_address = address;
// Ensure that we clean up on failure.
ON_RESULT_FAILURE {
ASSERT(!reuse_ll);
if (cur_address != start_address) {
const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
DisableMergeAttribute::None};
ASSERT(this->Operate(start_address, (cur_address - start_address) / PageSize,
unmap_properties.perm, OperationType::Unmap) == ResultSuccess);
}
};
// Iterate, mapping all pages in the group.
for (const auto& block : pg) {
// Map and advance.
const KPageProperties cur_properties =
(cur_address == start_address)
? properties
: KPageProperties{properties.perm, properties.io, properties.uncached,
DisableMergeAttribute::None};
this->Operate(cur_address, block.GetNumPages(), cur_properties.perm, OperationType::Map,
block.GetAddress());
cur_address += block.GetSize();
}
// We succeeded!
R_SUCCEED();
}
Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
bool is_pa_valid, VAddr region_start, size_t region_num_pages,
void KPageTable::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
const KPageGroup& pg) {
ASSERT(this->IsLockedByCurrentThread());
// Note the current address, so that we can iterate.
const KProcessAddress start_address = address;
const KProcessAddress last_address = start_address + size - 1;
const KProcessAddress end_address = last_address + 1;
// Iterate over the memory.
auto pg_it = pg.begin();
ASSERT(pg_it != pg.end());
KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
size_t pg_pages = pg_it->GetNumPages();
auto it = m_memory_block_manager.FindIterator(start_address);
while (true) {
// Check that the iterator is valid.
ASSERT(it != m_memory_block_manager.end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
// Determine the range to map.
KProcessAddress map_address = std::max<VAddr>(info.GetAddress(), start_address);
const KProcessAddress map_end_address = std::min<VAddr>(info.GetEndAddress(), end_address);
ASSERT(map_end_address != map_address);
// Determine if we should disable head merge.
const bool disable_head_merge =
info.GetAddress() >= start_address &&
True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal);
const KPageProperties map_properties = {
info.GetPermission(), false, false,
disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None};
// While we have pages to map, map them.
size_t map_pages = (map_end_address - map_address) / PageSize;
while (map_pages > 0) {
// Check if we're at the end of the physical block.
if (pg_pages == 0) {
// Ensure there are more pages to map.
ASSERT(pg_it != pg.end());
// Advance our physical block.
++pg_it;
pg_phys_addr = pg_it->GetAddress();
pg_pages = pg_it->GetNumPages();
}
// Map whatever we can.
const size_t cur_pages = std::min(pg_pages, map_pages);
ASSERT(this->Operate(map_address, map_pages, map_properties.perm, OperationType::Map,
pg_phys_addr) == ResultSuccess);
// Advance.
map_address += cur_pages * PageSize;
map_pages -= cur_pages;
pg_phys_addr += cur_pages * PageSize;
pg_pages -= cur_pages;
}
// Check if we're done.
if (last_address <= info.GetLastAddress()) {
break;
}
// Advance.
++it;
}
// Check that we re-mapped precisely the page group.
ASSERT((++pg_it) == pg.end());
}
Result KPageTable::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
KPhysicalAddress phys_addr, bool is_pa_valid,
KProcessAddress region_start, size_t region_num_pages,
KMemoryState state, KMemoryPermission perm) {
ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
@ -2084,26 +2211,30 @@ Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment,
KScopedLightLock lk(m_general_lock);
// Find a random address to map at.
VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
this->GetNumGuardPages());
KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment,
0, this->GetNumGuardPages());
R_UNLESS(addr != 0, ResultOutOfMemory);
ASSERT(Common::IsAligned(addr, alignment));
ASSERT(this->CanContain(addr, num_pages * PageSize, state));
ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryAttribute::None)
.IsSuccess());
KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
// Create an update allocator.
Result allocator_result{ResultSuccess};
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager);
R_TRY(allocator_result);
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Perform mapping operation.
if (is_pa_valid) {
R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr));
const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
R_TRY(this->Operate(addr, num_pages, properties.perm, OperationType::Map, phys_addr));
} else {
UNIMPLEMENTED();
R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm));
}
// Update the blocks.
@ -2116,28 +2247,45 @@ Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment,
R_SUCCEED();
}
Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
ASSERT(this->IsLockedByCurrentThread());
Result KPageTable::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
KMemoryPermission perm) {
// Check that the map is in range.
const size_t size = num_pages * PageSize;
R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
VAddr cur_addr{addr};
// Lock the table.
KScopedLightLock lk(m_general_lock);
for (const auto& node : page_linked_list) {
if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
OperationType::Unmap)};
result.IsError()) {
R_RETURN(result);
}
// Check the memory state.
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::None,
KMemoryAttribute::None));
cur_addr += node.GetNumPages() * PageSize;
}
// Create an update allocator.
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Map the pages.
R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm));
// Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
KMemoryBlockDisableMergeAttribute::None);
R_SUCCEED();
}
Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) {
Result KPageTable::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
// Check that the unmap is in range.
const size_t num_pages{page_linked_list.GetNumPages()};
const size_t size{num_pages * PageSize};
const size_t size = num_pages * PageSize;
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
// Lock the table.
@ -2151,13 +2299,18 @@ Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemo
KMemoryAttribute::None));
// Create an update allocator.
Result allocator_result{ResultSuccess};
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Perform the unmap.
R_TRY(UnmapPages(address, page_linked_list));
const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
DisableMergeAttribute::None};
R_TRY(this->Operate(address, num_pages, unmap_properties.perm, OperationType::Unmap));
// Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
@ -2168,29 +2321,130 @@ Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemo
R_SUCCEED();
}
Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) {
// Check that the unmap is in range.
const size_t size = num_pages * PageSize;
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
KProcessAddress region_start, size_t region_num_pages,
KMemoryState state, KMemoryPermission perm) {
ASSERT(!this->IsLockedByCurrentThread());
// Ensure this is a valid map request.
const size_t num_pages = pg.GetNumPages();
R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
ResultInvalidCurrentMemory);
R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
// Lock the table.
KScopedLightLock lk(m_general_lock);
// Check the memory state.
size_t num_allocator_blocks{};
// Find a random address to map at.
KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize,
0, this->GetNumGuardPages());
R_UNLESS(addr != 0, ResultOutOfMemory);
ASSERT(this->CanContain(addr, num_pages * PageSize, state));
ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
// Create an update allocator.
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager);
R_TRY(allocator_result);
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Perform mapping operation.
const KPageProperties properties = {perm, state == KMemoryState::Io, false,
DisableMergeAttribute::DisableHead};
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
// Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
KMemoryBlockDisableMergeAttribute::None);
// We successfully mapped the pages.
*out_addr = addr;
R_SUCCEED();
}
Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
KMemoryPermission perm) {
ASSERT(!this->IsLockedByCurrentThread());
// Ensure this is a valid map request.
const size_t num_pages = pg.GetNumPages();
const size_t size = num_pages * PageSize;
R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
// Lock the table.
KScopedLightLock lk(m_general_lock);
// Check if state allows us to map.
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size,
KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::None,
KMemoryAttribute::None));
// Create an update allocator.
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Perform mapping operation.
const KPageProperties properties = {perm, state == KMemoryState::Io, false,
DisableMergeAttribute::DisableHead};
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
// Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
KMemoryBlockDisableMergeAttribute::None);
// We successfully mapped the pages.
R_SUCCEED();
}
Result KPageTable::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg,
KMemoryState state) {
ASSERT(!this->IsLockedByCurrentThread());
// Ensure this is a valid unmap request.
const size_t num_pages = pg.GetNumPages();
const size_t size = num_pages * PageSize;
R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
// Lock the table.
KScopedLightLock lk(m_general_lock);
// Check if state allows us to unmap.
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
KMemoryState::All, state, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::None));
// Check that the page group is valid.
R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory);
// Create an update allocator.
Result allocator_result{ResultSuccess};
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
// Perform the unmap.
R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap));
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Perform unmapping operation.
const KPageProperties properties = {KMemoryPermission::None, false, false,
DisableMergeAttribute::None};
R_TRY(this->Operate(address, num_pages, properties.perm, OperationType::Unmap));
// Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
@ -2550,54 +2804,6 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
}
}
ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_t align,
bool is_map_only, VAddr region_start,
size_t region_num_pages, KMemoryState state,
KMemoryPermission perm, PAddr map_addr) {
KScopedLightLock lk(m_general_lock);
R_UNLESS(CanContain(region_start, region_num_pages * PageSize, state),
ResultInvalidCurrentMemory);
R_UNLESS(region_num_pages > needed_num_pages, ResultOutOfMemory);
const VAddr addr{
AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)};
R_UNLESS(addr, ResultOutOfMemory);
// Create an update allocator.
Result allocator_result{ResultSuccess};
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager);
if (is_map_only) {
R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
} else {
// Create a page group tohold the pages we allocate.
KPageGroup pg{m_kernel, m_block_info_manager};
R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
&pg, needed_num_pages,
KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
// Ensure that the page group is closed when we're done working with it.
SCOPE_EXIT({ pg.Close(); });
// Clear all pages.
for (const auto& it : pg) {
std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()),
m_heap_fill_value, it.GetSize());
}
R_TRY(Operate(addr, needed_num_pages, pg, OperationType::MapGroup));
}
// Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm,
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
KMemoryBlockDisableMergeAttribute::None);
return addr;
}
Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
KMemoryPermission perm, bool is_aligned,
bool check_heap) {

View File

@ -24,12 +24,36 @@ class System;
namespace Kernel {
enum class DisableMergeAttribute : u8 {
None = (0U << 0),
DisableHead = (1U << 0),
DisableHeadAndBody = (1U << 1),
EnableHeadAndBody = (1U << 2),
DisableTail = (1U << 3),
EnableTail = (1U << 4),
EnableAndMergeHeadBodyTail = (1U << 5),
EnableHeadBodyTail = EnableHeadAndBody | EnableTail,
DisableHeadBodyTail = DisableHeadAndBody | DisableTail,
};
struct KPageProperties {
KMemoryPermission perm;
bool io;
bool uncached;
DisableMergeAttribute disable_merge_attributes;
};
static_assert(std::is_trivial_v<KPageProperties>);
static_assert(sizeof(KPageProperties) == sizeof(u32));
class KBlockInfoManager;
class KMemoryBlockManager;
class KResourceLimit;
class KSystemResource;
class KPageTable final {
protected:
struct PageLinkedList;
public:
enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll };
@ -57,27 +81,12 @@ public:
Result UnmapPhysicalMemory(VAddr addr, size_t size);
Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
KMemoryPermission perm);
Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
KMemoryState state, KMemoryPermission perm) {
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
this->GetRegionAddress(state),
this->GetRegionSize(state) / PageSize, state, perm));
}
Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state);
Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm);
KMemoryInfo QueryInfo(VAddr addr);
Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm);
Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr);
Result SetMaxHeapSize(size_t size);
Result SetHeapSize(VAddr* out, size_t size);
ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only,
VAddr region_start, size_t region_num_pages,
KMemoryState state, KMemoryPermission perm,
PAddr map_addr = 0);
Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
KMemoryPermission perm, bool is_aligned, bool check_heap);
Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap);
@ -113,6 +122,40 @@ public:
bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
KPhysicalAddress phys_addr, KProcessAddress region_start,
size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start,
region_num_pages, state, perm));
}
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
this->GetRegionAddress(state),
this->GetRegionSize(state) / PageSize, state, perm));
}
Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
KMemoryPermission perm) {
R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false,
this->GetRegionAddress(state),
this->GetRegionSize(state) / PageSize, state, perm));
}
Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
KMemoryPermission perm);
Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
KProcessAddress region_start, size_t region_num_pages, KMemoryState state,
KMemoryPermission perm);
Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state,
KMemoryPermission perm);
Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state);
void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
const KPageGroup& pg);
protected:
struct PageLinkedList {
private:
@ -166,11 +209,9 @@ private:
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm);
Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
bool is_pa_valid, VAddr region_start, size_t region_num_pages,
KMemoryState state, KMemoryPermission perm);
Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
bool IsRegionContiguous(VAddr addr, u64 size) const;
void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list);
KMemoryInfo QueryInfoImpl(VAddr addr);
@ -265,6 +306,11 @@ private:
void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address,
size_t size, KMemoryPermission prot_perm);
Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
size_t num_pages, KMemoryPermission perm);
Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
const KPageGroup& pg, const KPageProperties properties, bool reuse_ll);
mutable KLightLock m_general_lock;
mutable KLightLock m_map_physical_memory_lock;

View File

@ -17,35 +17,41 @@ namespace Kernel {
class KThread;
template <typename T>
concept KPriorityQueueAffinityMask = !std::is_reference_v<T> && requires(T & t) {
{ t.GetAffinityMask() } -> Common::ConvertibleTo<u64>;
{t.SetAffinityMask(0)};
concept KPriorityQueueAffinityMask = !
std::is_reference_v<T>&& requires(T& t) {
{ t.GetAffinityMask() } -> Common::ConvertibleTo<u64>;
{ t.SetAffinityMask(0) };
{ t.GetAffinity(0) } -> std::same_as<bool>;
{t.SetAffinity(0, false)};
{t.SetAll()};
};
{ t.GetAffinity(0) } -> std::same_as<bool>;
{ t.SetAffinity(0, false) };
{ t.SetAll() };
};
template <typename T>
concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
{typename T::QueueEntry()};
{(typename T::QueueEntry()).Initialize()};
{(typename T::QueueEntry()).SetPrev(std::addressof(t))};
{(typename T::QueueEntry()).SetNext(std::addressof(t))};
{ (typename T::QueueEntry()).GetNext() } -> std::same_as<T*>;
{ (typename T::QueueEntry()).GetPrev() } -> std::same_as<T*>;
{ t.GetPriorityQueueEntry(0) } -> std::same_as<typename T::QueueEntry&>;
concept KPriorityQueueMember = !
std::is_reference_v<T>&& requires(T& t) {
{ typename T::QueueEntry() };
{ (typename T::QueueEntry()).Initialize() };
{ (typename T::QueueEntry()).SetPrev(std::addressof(t)) };
{ (typename T::QueueEntry()).SetNext(std::addressof(t)) };
{ (typename T::QueueEntry()).GetNext() } -> std::same_as<T*>;
{ (typename T::QueueEntry()).GetPrev() } -> std::same_as<T*>;
{
t.GetPriorityQueueEntry(0)
} -> std::same_as<typename T::QueueEntry&>;
{t.GetAffinityMask()};
{ std::remove_cvref_t<decltype(t.GetAffinityMask())>() } -> KPriorityQueueAffinityMask;
{ t.GetAffinityMask() };
{
std::remove_cvref_t<decltype(t.GetAffinityMask())>()
} -> KPriorityQueueAffinityMask;
{ t.GetActiveCore() } -> Common::ConvertibleTo<s32>;
{ t.GetPriority() } -> Common::ConvertibleTo<s32>;
{ t.IsDummyThread() } -> Common::ConvertibleTo<bool>;
};
{ t.GetActiveCore() } -> Common::ConvertibleTo<s32>;
{ t.GetPriority() } -> Common::ConvertibleTo<s32>;
{ t.IsDummyThread() } -> Common::ConvertibleTo<bool>;
};
template <typename Member, size_t NumCores_, int LowestPriority, int HighestPriority>
requires KPriorityQueueMember<Member>
requires KPriorityQueueMember<Member>
class KPriorityQueue {
public:
using AffinityMaskType = std::remove_cv_t<

View File

@ -417,9 +417,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
}
void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
AllocateMainThreadStack(stack_size);
ASSERT(AllocateMainThreadStack(stack_size) == ResultSuccess);
resource_limit->Reserve(LimitableResource::ThreadCountMax, 1);
resource_limit->Reserve(LimitableResource::PhysicalMemoryMax, main_thread_stack_size);
const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)};
ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError());
@ -675,20 +674,31 @@ void KProcess::ChangeState(State new_state) {
}
Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
ASSERT(stack_size);
// Ensure that we haven't already allocated stack.
ASSERT(main_thread_stack_size == 0);
// The kernel always ensures that the given stack size is page aligned.
main_thread_stack_size = Common::AlignUp(stack_size, PageSize);
// Ensure that we're allocating a valid stack.
stack_size = Common::AlignUp(stack_size, PageSize);
// R_UNLESS(stack_size + image_size <= m_max_process_memory, ResultOutOfMemory);
R_UNLESS(stack_size + image_size >= image_size, ResultOutOfMemory);
const VAddr start{page_table.GetStackRegionStart()};
const std::size_t size{page_table.GetStackRegionEnd() - start};
// Place a tentative reservation of memory for our new stack.
KScopedResourceReservation mem_reservation(this, Svc::LimitableResource::PhysicalMemoryMax,
stack_size);
R_UNLESS(mem_reservation.Succeeded(), ResultLimitReached);
CASCADE_RESULT(main_thread_stack_top,
page_table.AllocateAndMapMemory(
main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize,
KMemoryState::Stack, KMemoryPermission::UserReadWrite));
// Allocate and map our stack.
if (stack_size) {
KProcessAddress stack_bottom;
R_TRY(page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize,
KMemoryState::Stack, KMemoryPermission::UserReadWrite));
main_thread_stack_top += main_thread_stack_size;
main_thread_stack_top = stack_bottom + stack_size;
main_thread_stack_size = stack_size;
}
// We succeeded! Commit our memory reservation.
mem_reservation.Commit();
R_SUCCEED();
}

View File

@ -9,13 +9,14 @@
namespace Kernel {
template <typename T>
concept KLockable = !std::is_reference_v<T> && requires(T & t) {
{ t.Lock() } -> std::same_as<void>;
{ t.Unlock() } -> std::same_as<void>;
};
concept KLockable = !
std::is_reference_v<T>&& requires(T& t) {
{ t.Lock() } -> std::same_as<void>;
{ t.Unlock() } -> std::same_as<void>;
};
template <typename T>
requires KLockable<T>
requires KLockable<T>
class [[nodiscard]] KScopedLock {
public:
explicit KScopedLock(T* l) : lock_ptr(l) {

View File

@ -94,15 +94,15 @@ Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t m
R_UNLESS(map_perm == test_perm, ResultInvalidNewMemoryPermission);
}
return target_process.PageTable().MapPages(address, *page_group, KMemoryState::Shared,
ConvertToKMemoryPermission(map_perm));
return target_process.PageTable().MapPageGroup(address, *page_group, KMemoryState::Shared,
ConvertToKMemoryPermission(map_perm));
}
Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
// Validate the size.
R_UNLESS(size == unmap_size, ResultInvalidSize);
return target_process.PageTable().UnmapPages(address, *page_group, KMemoryState::Shared);
return target_process.PageTable().UnmapPageGroup(address, *page_group, KMemoryState::Shared);
}
} // namespace Kernel

View File

@ -330,7 +330,7 @@ void KThread::Finalize() {
KThread* const waiter = std::addressof(*it);
// The thread shouldn't be a kernel waiter.
ASSERT(!IsKernelAddressKey(waiter->GetAddressKey()));
ASSERT(!waiter->GetAddressKeyIsKernel());
// Clear the lock owner.
waiter->SetLockOwner(nullptr);
@ -763,19 +763,6 @@ void KThread::Continue() {
KScheduler::OnThreadStateChanged(kernel, this, old_state);
}
void KThread::WaitUntilSuspended() {
// Make sure we have a suspend requested.
ASSERT(IsSuspendRequested());
// Loop until the thread is not executing on any core.
for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
KThread* core_thread{};
do {
core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread();
} while (core_thread == this);
}
}
Result KThread::SetActivity(Svc::ThreadActivity activity) {
// Lock ourselves.
KScopedLightLock lk(activity_pause_lock);
@ -897,7 +884,7 @@ void KThread::AddWaiterImpl(KThread* thread) {
}
// Keep track of how many kernel waiters we have.
if (IsKernelAddressKey(thread->GetAddressKey())) {
if (thread->GetAddressKeyIsKernel()) {
ASSERT((num_kernel_waiters++) >= 0);
KScheduler::SetSchedulerUpdateNeeded(kernel);
}
@ -911,7 +898,7 @@ void KThread::RemoveWaiterImpl(KThread* thread) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Keep track of how many kernel waiters we have.
if (IsKernelAddressKey(thread->GetAddressKey())) {
if (thread->GetAddressKeyIsKernel()) {
ASSERT((num_kernel_waiters--) > 0);
KScheduler::SetSchedulerUpdateNeeded(kernel);
}
@ -987,7 +974,7 @@ KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
KThread* thread = std::addressof(*it);
// Keep track of how many kernel waiters we have.
if (IsKernelAddressKey(thread->GetAddressKey())) {
if (thread->GetAddressKeyIsKernel()) {
ASSERT((num_kernel_waiters--) > 0);
KScheduler::SetSchedulerUpdateNeeded(kernel);
}

View File

@ -214,8 +214,6 @@ public:
void Continue();
void WaitUntilSuspended();
constexpr void SetSyncedIndex(s32 index) {
synced_index = index;
}
@ -607,13 +605,30 @@ public:
return address_key_value;
}
void SetAddressKey(VAddr key) {
address_key = key;
[[nodiscard]] bool GetAddressKeyIsKernel() const {
return address_key_is_kernel;
}
void SetAddressKey(VAddr key, u32 val) {
//! NB: intentional deviation from official kernel.
//
// Separate SetAddressKey into user and kernel versions
// to cope with arbitrary host pointers making their way
// into things.
void SetUserAddressKey(VAddr key) {
address_key = key;
address_key_is_kernel = false;
}
void SetUserAddressKey(VAddr key, u32 val) {
address_key = key;
address_key_value = val;
address_key_is_kernel = false;
}
void SetKernelAddressKey(VAddr key) {
address_key = key;
address_key_is_kernel = true;
}
void ClearWaitQueue() {
@ -662,7 +677,7 @@ private:
union SyncObjectBuffer {
std::array<KSynchronizationObject*, Svc::ArgumentHandleCountMax> sync_objects{};
std::array<Handle,
Svc::ArgumentHandleCountMax*(sizeof(KSynchronizationObject*) / sizeof(Handle))>
Svc::ArgumentHandleCountMax * (sizeof(KSynchronizationObject*) / sizeof(Handle))>
handles;
constexpr SyncObjectBuffer() {}
};
@ -683,10 +698,8 @@ private:
};
template <typename T>
requires(
std::same_as<T, KThread> ||
std::same_as<T, RedBlackKeyType>) static constexpr int Compare(const T& lhs,
const KThread& rhs) {
requires(std::same_as<T, KThread> || std::same_as<T, RedBlackKeyType>)
static constexpr int Compare(const T& lhs, const KThread& rhs) {
const u64 l_key = lhs.GetConditionVariableKey();
const u64 r_key = rhs.GetConditionVariableKey();
@ -772,6 +785,7 @@ private:
bool debug_attached{};
s8 priority_inheritance_count{};
bool resource_limit_release_hint{};
bool address_key_is_kernel{};
StackParameters stack_parameters{};
Common::SpinLock context_guard{};

View File

@ -70,10 +70,8 @@ public:
}
template <typename T>
requires(std::same_as<T, KThreadLocalPage> ||
std::same_as<T, RedBlackKeyType>) static constexpr int Compare(const T& lhs,
const KThreadLocalPage&
rhs) {
requires(std::same_as<T, KThreadLocalPage> || std::same_as<T, RedBlackKeyType>)
static constexpr int Compare(const T& lhs, const KThreadLocalPage& rhs) {
const VAddr lval = GetRedBlackKey(lhs);
const VAddr rval = GetRedBlackKey(rhs);

View File

@ -1198,27 +1198,34 @@ void KernelCore::Suspend(bool suspended) {
const bool should_suspend{exception_exited || suspended};
const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable;
std::vector<KScopedAutoObject<KThread>> process_threads;
{
KScopedSchedulerLock sl{*this};
if (auto* process = CurrentProcess(); process != nullptr) {
process->SetActivity(activity);
if (!should_suspend) {
// Runnable now; no need to wait.
return;
}
for (auto* thread : process->GetThreadList()) {
process_threads.emplace_back(thread);
}
}
//! This refers to the application process, not the current process.
KScopedAutoObject<KProcess> process = CurrentProcess();
if (process.IsNull()) {
return;
}
// Wait for execution to stop.
for (auto& thread : process_threads) {
thread->WaitUntilSuspended();
// Set the new activity.
process->SetActivity(activity);
// Wait for process execution to stop.
bool must_wait{should_suspend};
// KernelCore::Suspend must be called from locked context, or we
// could race another call to SetActivity, interfering with waiting.
while (must_wait) {
KScopedSchedulerLock sl{*this};
// Assume that all threads have finished running.
must_wait = false;
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
if (Scheduler(i).GetSchedulerCurrentThread()->GetOwnerProcess() ==
process.GetPointerUnsafe()) {
// A thread has not finished running yet.
// Continue waiting.
must_wait = true;
}
}
}
}

View File

@ -35,6 +35,7 @@ class GlobalSchedulerContext;
class KAutoObjectWithListContainer;
class KClientSession;
class KDebug;
class KDeviceAddressSpace;
class KDynamicPageManager;
class KEvent;
class KEventInfo;
@ -359,6 +360,8 @@ public:
return slab_heap_container->transfer_memory;
} else if constexpr (std::is_same_v<T, KCodeMemory>) {
return slab_heap_container->code_memory;
} else if constexpr (std::is_same_v<T, KDeviceAddressSpace>) {
return slab_heap_container->device_address_space;
} else if constexpr (std::is_same_v<T, KPageBuffer>) {
return slab_heap_container->page_buffer;
} else if constexpr (std::is_same_v<T, KThreadLocalPage>) {
@ -431,6 +434,7 @@ private:
KSlabHeap<KThread> thread;
KSlabHeap<KTransferMemory> transfer_memory;
KSlabHeap<KCodeMemory> code_memory;
KSlabHeap<KDeviceAddressSpace> device_address_space;
KSlabHeap<KPageBuffer> page_buffer;
KSlabHeap<KThreadLocalPage> thread_local_page;
KSlabHeap<KSessionRequest> session_request;

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,8 @@
#pragma once
#include "common/common_types.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"
namespace Core {
class System;
@ -13,4 +15,158 @@ namespace Kernel::Svc {
void Call(Core::System& system, u32 immediate);
Result SetHeapSize(Core::System& system, VAddr* out_address, u64 size);
Result SetMemoryPermission(Core::System& system, VAddr address, u64 size, MemoryPermission perm);
Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, u32 attr);
Result MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size);
Result UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size);
Result QueryMemory(Core::System& system, VAddr memory_info_address, VAddr page_info_address,
VAddr query_address);
void ExitProcess(Core::System& system);
Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
VAddr stack_bottom, u32 priority, s32 core_id);
Result StartThread(Core::System& system, Handle thread_handle);
void ExitThread(Core::System& system);
void SleepThread(Core::System& system, s64 nanoseconds);
Result GetThreadPriority(Core::System& system, u32* out_priority, Handle handle);
Result SetThreadPriority(Core::System& system, Handle thread_handle, u32 priority);
Result GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
u64* out_affinity_mask);
Result SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
u64 affinity_mask);
u32 GetCurrentProcessorNumber(Core::System& system);
Result SignalEvent(Core::System& system, Handle event_handle);
Result ClearEvent(Core::System& system, Handle event_handle);
Result MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size,
MemoryPermission map_perm);
Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size);
Result CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size,
MemoryPermission map_perm);
Result CloseHandle(Core::System& system, Handle handle);
Result ResetSignal(Core::System& system, Handle handle);
Result WaitSynchronization(Core::System& system, s32* index, VAddr handles_address, s32 num_handles,
s64 nano_seconds);
Result CancelSynchronization(Core::System& system, Handle handle);
Result ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address, u32 tag);
Result ArbitrateUnlock(Core::System& system, VAddr address);
Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key, u32 tag,
s64 timeout_ns);
void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count);
u64 GetSystemTick(Core::System& system);
Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address);
Result SendSyncRequest(Core::System& system, Handle handle);
Result GetProcessId(Core::System& system, u64* out_process_id, Handle handle);
Result GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle);
void Break(Core::System& system, u32 reason, u64 info1, u64 info2);
void OutputDebugString(Core::System& system, VAddr address, u64 len);
Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle, u64 info_sub_id);
Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size);
Result UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size);
Result GetResourceLimitLimitValue(Core::System& system, u64* out_limit_value,
Handle resource_limit_handle, LimitableResource which);
Result GetResourceLimitCurrentValue(Core::System& system, u64* out_current_value,
Handle resource_limit_handle, LimitableResource which);
Result SetThreadActivity(Core::System& system, Handle thread_handle,
ThreadActivity thread_activity);
Result GetThreadContext(Core::System& system, VAddr out_context, Handle thread_handle);
Result WaitForAddress(Core::System& system, VAddr address, ArbitrationType arb_type, s32 value,
s64 timeout_ns);
Result SignalToAddress(Core::System& system, VAddr address, SignalType signal_type, s32 value,
s32 count);
void SynchronizePreemptionState(Core::System& system);
void KernelDebug(Core::System& system, u32 kernel_debug_type, u64 param1, u64 param2, u64 param3);
void ChangeKernelTraceState(Core::System& system, u32 trace_state);
Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client, u32 is_light,
u64 name);
Result ReplyAndReceive(Core::System& system, s32* out_index, Handle* handles, s32 num_handles,
Handle reply_target, s64 timeout_ns);
Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read);
Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size);
Result ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
VAddr address, size_t size, MemoryPermission perm);
Result GetProcessList(Core::System& system, u32* out_num_processes, VAddr out_process_ids,
u32 out_process_ids_size);
Result GetThreadList(Core::System& system, u32* out_num_threads, VAddr out_thread_ids,
u32 out_thread_ids_size, Handle debug_handle);
Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, VAddr address,
u64 size, MemoryPermission perm);
Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
VAddr src_address, u64 size);
Result UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
VAddr src_address, u64 size);
Result QueryProcessMemory(Core::System& system, VAddr memory_info_address, VAddr page_info_address,
Handle process_handle, VAddr address);
Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
u64 src_address, u64 size);
Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
u64 src_address, u64 size);
Result GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type);
Result CreateResourceLimit(Core::System& system, Handle* out_handle);
Result SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle,
LimitableResource which, u64 limit_value);
//
Result SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_size);
Result SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask, u32 attr);
Result MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size);
Result UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size);
Result QueryMemory32(Core::System& system, u32 memory_info_address, u32 page_info_address,
u32 query_address);
void ExitProcess32(Core::System& system);
Result CreateThread32(Core::System& system, Handle* out_handle, u32 priority, u32 entry_point,
u32 arg, u32 stack_top, s32 processor_id);
Result StartThread32(Core::System& system, Handle thread_handle);
void ExitThread32(Core::System& system);
void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanoseconds_high);
Result GetThreadPriority32(Core::System& system, u32* out_priority, Handle handle);
Result SetThreadPriority32(Core::System& system, Handle thread_handle, u32 priority);
Result GetThreadCoreMask32(Core::System& system, Handle thread_handle, s32* out_core_id,
u32* out_affinity_mask_low, u32* out_affinity_mask_high);
Result SetThreadCoreMask32(Core::System& system, Handle thread_handle, s32 core_id,
u32 affinity_mask_low, u32 affinity_mask_high);
u32 GetCurrentProcessorNumber32(Core::System& system);
Result SignalEvent32(Core::System& system, Handle event_handle);
Result ClearEvent32(Core::System& system, Handle event_handle);
Result MapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address, u32 size,
MemoryPermission map_perm);
Result UnmapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address, u32 size);
Result CreateTransferMemory32(Core::System& system, Handle* out, u32 address, u32 size,
MemoryPermission map_perm);
Result CloseHandle32(Core::System& system, Handle handle);
Result ResetSignal32(Core::System& system, Handle handle);
Result WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
s32 num_handles, u32 timeout_high, s32* index);
Result CancelSynchronization32(Core::System& system, Handle handle);
Result ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address, u32 tag);
Result ArbitrateUnlock32(Core::System& system, u32 address);
Result WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
u32 timeout_ns_low, u32 timeout_ns_high);
void SignalProcessWideKey32(Core::System& system, u32 cv_key, s32 count);
void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high);
Result ConnectToNamedPort32(Core::System& system, Handle* out_handle, u32 port_name_address);
Result SendSyncRequest32(Core::System& system, Handle handle);
Result GetProcessId32(Core::System& system, u32* out_process_id_low, u32* out_process_id_high,
Handle handle);
Result GetThreadId32(Core::System& system, u32* out_thread_id_low, u32* out_thread_id_high,
Handle thread_handle);
void Break32(Core::System& system, u32 reason, u32 info1, u32 info2);
void OutputDebugString32(Core::System& system, u32 address, u32 len);
Result GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low,
u32 info_id, u32 handle, u32 sub_id_high);
Result MapPhysicalMemory32(Core::System& system, u32 addr, u32 size);
Result UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size);
Result SetThreadActivity32(Core::System& system, Handle thread_handle,
ThreadActivity thread_activity);
Result GetThreadContext32(Core::System& system, u32 out_context, Handle thread_handle);
Result WaitForAddress32(Core::System& system, u32 address, ArbitrationType arb_type, s32 value,
u32 timeout_ns_low, u32 timeout_ns_high);
Result SignalToAddress32(Core::System& system, u32 address, SignalType signal_type, s32 value,
s32 count);
Result CreateEvent32(Core::System& system, Handle* out_write, Handle* out_read);
Result CreateCodeMemory32(Core::System& system, Handle* out, u32 address, u32 size);
Result ControlCodeMemory32(Core::System& system, Handle code_memory_handle, u32 operation,
u64 address, u64 size, MemoryPermission perm);
Result FlushProcessDataCache32(Core::System& system, Handle process_handle, u64 address, u64 size);
} // namespace Kernel::Svc

View File

@ -0,0 +1,44 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/core.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/svc.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel::Svc {
/// Sets the thread activity
Result SetThreadActivity(Core::System& system, Handle thread_handle,
ThreadActivity thread_activity) {
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", thread_handle,
thread_activity);
// Validate the activity.
constexpr auto IsValidThreadActivity = [](ThreadActivity activity) {
return activity == ThreadActivity::Runnable || activity == ThreadActivity::Paused;
};
R_UNLESS(IsValidThreadActivity(thread_activity), ResultInvalidEnumValue);
// Get the thread from its handle.
KScopedAutoObject thread =
system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
// Check that the activity is being set on a non-current thread for the current process.
R_UNLESS(thread->GetOwnerProcess() == system.Kernel().CurrentProcess(), ResultInvalidHandle);
R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(system.Kernel()), ResultBusy);
// Set the activity.
R_TRY(thread->SetActivity(thread_activity));
return ResultSuccess;
}
Result SetThreadActivity32(Core::System& system, Handle thread_handle,
ThreadActivity thread_activity) {
return SetThreadActivity(system, thread_handle, thread_activity);
}
} // namespace Kernel::Svc

View File

@ -0,0 +1,113 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/core.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/svc_types.h"
namespace Kernel::Svc {
namespace {
constexpr bool IsValidSignalType(Svc::SignalType type) {
switch (type) {
case Svc::SignalType::Signal:
case Svc::SignalType::SignalAndIncrementIfEqual:
case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
return true;
default:
return false;
}
}
constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
switch (type) {
case Svc::ArbitrationType::WaitIfLessThan:
case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
case Svc::ArbitrationType::WaitIfEqual:
return true;
default:
return false;
}
}
} // namespace
// Wait for an address (via Address Arbiter)
Result WaitForAddress(Core::System& system, VAddr address, ArbitrationType arb_type, s32 value,
s64 timeout_ns) {
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
address, arb_type, value, timeout_ns);
// Validate input.
if (IsKernelAddress(address)) {
LOG_ERROR(Kernel_SVC, "Attempting to wait on kernel address (address={:08X})", address);
return ResultInvalidCurrentMemory;
}
if (!Common::IsAligned(address, sizeof(s32))) {
LOG_ERROR(Kernel_SVC, "Wait address must be 4 byte aligned (address={:08X})", address);
return ResultInvalidAddress;
}
if (!IsValidArbitrationType(arb_type)) {
LOG_ERROR(Kernel_SVC, "Invalid arbitration type specified (type={})", arb_type);
return ResultInvalidEnumValue;
}
// Convert timeout from nanoseconds to ticks.
s64 timeout{};
if (timeout_ns > 0) {
const s64 offset_tick(timeout_ns);
if (offset_tick > 0) {
timeout = offset_tick + 2;
if (timeout <= 0) {
timeout = std::numeric_limits<s64>::max();
}
} else {
timeout = std::numeric_limits<s64>::max();
}
} else {
timeout = timeout_ns;
}
return system.Kernel().CurrentProcess()->WaitAddressArbiter(address, arb_type, value, timeout);
}
Result WaitForAddress32(Core::System& system, u32 address, ArbitrationType arb_type, s32 value,
u32 timeout_ns_low, u32 timeout_ns_high) {
const auto timeout = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
return WaitForAddress(system, address, arb_type, value, timeout);
}
// Signals to an address (via Address Arbiter)
Result SignalToAddress(Core::System& system, VAddr address, SignalType signal_type, s32 value,
s32 count) {
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
address, signal_type, value, count);
// Validate input.
if (IsKernelAddress(address)) {
LOG_ERROR(Kernel_SVC, "Attempting to signal to a kernel address (address={:08X})", address);
return ResultInvalidCurrentMemory;
}
if (!Common::IsAligned(address, sizeof(s32))) {
LOG_ERROR(Kernel_SVC, "Signaled address must be 4 byte aligned (address={:08X})", address);
return ResultInvalidAddress;
}
if (!IsValidSignalType(signal_type)) {
LOG_ERROR(Kernel_SVC, "Invalid signal type specified (type={})", signal_type);
return ResultInvalidEnumValue;
}
return system.Kernel().CurrentProcess()->SignalAddressArbiter(address, signal_type, value,
count);
}
Result SignalToAddress32(Core::System& system, u32 address, SignalType signal_type, s32 value,
s32 count) {
return SignalToAddress(system, address, signal_type, value, count);
}
} // namespace Kernel::Svc

View File

@ -0,0 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/svc.h"
namespace Kernel::Svc {} // namespace Kernel::Svc

View File

@ -0,0 +1,31 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/core.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/svc.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/svc_types.h"
namespace Kernel::Svc {
Result FlushProcessDataCache32(Core::System& system, Handle process_handle, u64 address, u64 size) {
// Validate address/size.
R_UNLESS(size > 0, ResultInvalidSize);
R_UNLESS(address == static_cast<uintptr_t>(address), ResultInvalidCurrentMemory);
R_UNLESS(size == static_cast<size_t>(size), ResultInvalidCurrentMemory);
// Get the process from its handle.
KScopedAutoObject process =
system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KProcess>(process_handle);
R_UNLESS(process.IsNotNull(), ResultInvalidHandle);
// Verify the region is within range.
auto& page_table = process->PageTable();
R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
// Perform the operation.
R_RETURN(system.Memory().FlushDataCache(*process, address, size));
}
} // namespace Kernel::Svc

View File

@ -0,0 +1,154 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/core.h"
#include "core/hle/kernel/k_code_memory.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc.h"
namespace Kernel::Svc {
namespace {
constexpr bool IsValidMapCodeMemoryPermission(MemoryPermission perm) {
return perm == MemoryPermission::ReadWrite;
}
constexpr bool IsValidMapToOwnerCodeMemoryPermission(MemoryPermission perm) {
return perm == MemoryPermission::Read || perm == MemoryPermission::ReadExecute;
}
constexpr bool IsValidUnmapCodeMemoryPermission(MemoryPermission perm) {
return perm == MemoryPermission::None;
}
constexpr bool IsValidUnmapFromOwnerCodeMemoryPermission(MemoryPermission perm) {
return perm == MemoryPermission::None;
}
} // namespace
Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, size=0x{:X}", address, size);
// Get kernel instance.
auto& kernel = system.Kernel();
// Validate address / size.
R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
R_UNLESS(size > 0, ResultInvalidSize);
R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
// Create the code memory.
KCodeMemory* code_mem = KCodeMemory::Create(kernel);
R_UNLESS(code_mem != nullptr, ResultOutOfResource);
// Verify that the region is in range.
R_UNLESS(system.CurrentProcess()->PageTable().Contains(address, size),
ResultInvalidCurrentMemory);
// Initialize the code memory.
R_TRY(code_mem->Initialize(system.DeviceMemory(), address, size));
// Register the code memory.
KCodeMemory::Register(kernel, code_mem);
// Add the code memory to the handle table.
R_TRY(system.CurrentProcess()->GetHandleTable().Add(out, code_mem));
code_mem->Close();
return ResultSuccess;
}
Result CreateCodeMemory32(Core::System& system, Handle* out, u32 address, u32 size) {
return CreateCodeMemory(system, out, address, size);
}
Result ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
VAddr address, size_t size, MemoryPermission perm) {
LOG_TRACE(Kernel_SVC,
"called, code_memory_handle=0x{:X}, operation=0x{:X}, address=0x{:X}, size=0x{:X}, "
"permission=0x{:X}",
code_memory_handle, operation, address, size, perm);
// Validate the address / size.
R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
R_UNLESS(size > 0, ResultInvalidSize);
R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
// Get the code memory from its handle.
KScopedAutoObject code_mem =
system.CurrentProcess()->GetHandleTable().GetObject<KCodeMemory>(code_memory_handle);
R_UNLESS(code_mem.IsNotNull(), ResultInvalidHandle);
// NOTE: Here, Atmosphere extends the SVC to allow code memory operations on one's own process.
// This enables homebrew usage of these SVCs for JIT.
// Perform the operation.
switch (static_cast<CodeMemoryOperation>(operation)) {
case CodeMemoryOperation::Map: {
// Check that the region is in range.
R_UNLESS(
system.CurrentProcess()->PageTable().CanContain(address, size, KMemoryState::CodeOut),
ResultInvalidMemoryRegion);
// Check the memory permission.
R_UNLESS(IsValidMapCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
// Map the memory.
R_TRY(code_mem->Map(address, size));
} break;
case CodeMemoryOperation::Unmap: {
// Check that the region is in range.
R_UNLESS(
system.CurrentProcess()->PageTable().CanContain(address, size, KMemoryState::CodeOut),
ResultInvalidMemoryRegion);
// Check the memory permission.
R_UNLESS(IsValidUnmapCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
// Unmap the memory.
R_TRY(code_mem->Unmap(address, size));
} break;
case CodeMemoryOperation::MapToOwner: {
// Check that the region is in range.
R_UNLESS(code_mem->GetOwner()->PageTable().CanContain(address, size,
KMemoryState::GeneratedCode),
ResultInvalidMemoryRegion);
// Check the memory permission.
R_UNLESS(IsValidMapToOwnerCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
// Map the memory to its owner.
R_TRY(code_mem->MapToOwner(address, size, perm));
} break;
case CodeMemoryOperation::UnmapFromOwner: {
// Check that the region is in range.
R_UNLESS(code_mem->GetOwner()->PageTable().CanContain(address, size,
KMemoryState::GeneratedCode),
ResultInvalidMemoryRegion);
// Check the memory permission.
R_UNLESS(IsValidUnmapFromOwnerCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
// Unmap the memory from its owner.
R_TRY(code_mem->UnmapFromOwner(address, size));
} break;
default:
return ResultInvalidEnumValue;
}
return ResultSuccess;
}
Result ControlCodeMemory32(Core::System& system, Handle code_memory_handle, u32 operation,
u64 address, u64 size, MemoryPermission perm) {
return ControlCodeMemory(system, code_memory_handle, operation, address, size, perm);
}
} // namespace Kernel::Svc

View File

@ -0,0 +1,69 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/core.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel::Svc {
/// Wait process wide key atomic
Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key, u32 tag,
s64 timeout_ns) {
LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
cv_key, tag, timeout_ns);
// Validate input.
if (IsKernelAddress(address)) {
LOG_ERROR(Kernel_SVC, "Attempted to wait on kernel address (address={:08X})", address);
return ResultInvalidCurrentMemory;
}
if (!Common::IsAligned(address, sizeof(s32))) {
LOG_ERROR(Kernel_SVC, "Address must be 4 byte aligned (address={:08X})", address);
return ResultInvalidAddress;
}
// Convert timeout from nanoseconds to ticks.
s64 timeout{};
if (timeout_ns > 0) {
const s64 offset_tick(timeout_ns);
if (offset_tick > 0) {
timeout = offset_tick + 2;
if (timeout <= 0) {
timeout = std::numeric_limits<s64>::max();
}
} else {
timeout = std::numeric_limits<s64>::max();
}
} else {
timeout = timeout_ns;
}
// Wait on the condition variable.
return system.Kernel().CurrentProcess()->WaitConditionVariable(
address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout);
}
Result WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
u32 timeout_ns_low, u32 timeout_ns_high) {
const auto timeout_ns = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
return WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns);
}
/// Signal process wide key
void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count) {
LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count);
// Signal the condition variable.
return system.Kernel().CurrentProcess()->SignalConditionVariable(
Common::AlignDown(cv_key, sizeof(u32)), count);
}
void SignalProcessWideKey32(Core::System& system, u32 cv_key, s32 count) {
SignalProcessWideKey(system, cv_key, count);
}
} // namespace Kernel::Svc

View File

@ -0,0 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/svc.h"
namespace Kernel::Svc {} // namespace Kernel::Svc

View File

@ -0,0 +1,25 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/core.h"
#include "core/hle/kernel/svc.h"
#include "core/memory.h"
namespace Kernel::Svc {
/// Used to output a message on a debug hardware unit - does nothing on a retail unit
void OutputDebugString(Core::System& system, VAddr address, u64 len) {
if (len == 0) {
return;
}
std::string str(len, '\0');
system.Memory().ReadBlock(address, str.data(), str.size());
LOG_DEBUG(Debug_Emulated, "{}", str);
}
void OutputDebugString32(Core::System& system, u32 address, u32 len) {
OutputDebugString(system, address, len);
}
} // namespace Kernel::Svc

View File

@ -0,0 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/svc.h"
namespace Kernel::Svc {} // namespace Kernel::Svc

View File

@ -0,0 +1,111 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/scope_exit.h"
#include "core/core.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc.h"
namespace Kernel::Svc {
Result SignalEvent(Core::System& system, Handle event_handle) {
LOG_DEBUG(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
// Get the current handle table.
const KHandleTable& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
// Get the event.
KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
R_UNLESS(event.IsNotNull(), ResultInvalidHandle);
return event->Signal();
}
Result SignalEvent32(Core::System& system, Handle event_handle) {
return SignalEvent(system, event_handle);
}
Result ClearEvent(Core::System& system, Handle event_handle) {
LOG_TRACE(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
// Get the current handle table.
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
// Try to clear the writable event.
{
KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
if (event.IsNotNull()) {
return event->Clear();
}
}
// Try to clear the readable event.
{
KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(event_handle);
if (readable_event.IsNotNull()) {
return readable_event->Clear();
}
}
LOG_ERROR(Kernel_SVC, "Event handle does not exist, event_handle=0x{:08X}", event_handle);
return ResultInvalidHandle;
}
Result ClearEvent32(Core::System& system, Handle event_handle) {
return ClearEvent(system, event_handle);
}
Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
LOG_DEBUG(Kernel_SVC, "called");
// Get the kernel reference and handle table.
auto& kernel = system.Kernel();
auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
// Reserve a new event from the process resource limit
KScopedResourceReservation event_reservation(kernel.CurrentProcess(),
LimitableResource::EventCountMax);
R_UNLESS(event_reservation.Succeeded(), ResultLimitReached);
// Create a new event.
KEvent* event = KEvent::Create(kernel);
R_UNLESS(event != nullptr, ResultOutOfResource);
// Initialize the event.
event->Initialize(kernel.CurrentProcess());
// Commit the thread reservation.
event_reservation.Commit();
// Ensure that we clean up the event (and its only references are handle table) on function end.
SCOPE_EXIT({
event->GetReadableEvent().Close();
event->Close();
});
// Register the event.
KEvent::Register(kernel, event);
// Add the event to the handle table.
R_TRY(handle_table.Add(out_write, event));
// Ensure that we maintaing a clean handle state on exit.
auto handle_guard = SCOPE_GUARD({ handle_table.Remove(*out_write); });
// Add the readable event to the handle table.
R_TRY(handle_table.Add(out_read, std::addressof(event->GetReadableEvent())));
// We succeeded.
handle_guard.Cancel();
return ResultSuccess;
}
Result CreateEvent32(Core::System& system, Handle* out_write, Handle* out_read) {
return CreateEvent(system, out_write, out_read);
}
} // namespace Kernel::Svc

View File

@ -0,0 +1,121 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/core.h"
#include "core/debugger/debugger.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/svc.h"
#include "core/hle/kernel/svc_types.h"
#include "core/memory.h"
#include "core/reporter.h"
namespace Kernel::Svc {
/// Break program execution
void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
BreakReason break_reason =
static_cast<BreakReason>(reason & ~static_cast<u32>(BreakReason::NotificationOnlyFlag));
bool notification_only = (reason & static_cast<u32>(BreakReason::NotificationOnlyFlag)) != 0;
bool has_dumped_buffer{};
std::vector<u8> debug_buffer;
const auto handle_debug_buffer = [&](VAddr addr, u64 sz) {
if (sz == 0 || addr == 0 || has_dumped_buffer) {
return;
}
auto& memory = system.Memory();
// This typically is an error code so we're going to assume this is the case
if (sz == sizeof(u32)) {
LOG_CRITICAL(Debug_Emulated, "debug_buffer_err_code={:X}", memory.Read32(addr));
} else {
// We don't know what's in here so we'll hexdump it
debug_buffer.resize(sz);
memory.ReadBlock(addr, debug_buffer.data(), sz);
std::string hexdump;
for (std::size_t i = 0; i < debug_buffer.size(); i++) {
hexdump += fmt::format("{:02X} ", debug_buffer[i]);
if (i != 0 && i % 16 == 0) {
hexdump += '\n';
}
}
LOG_CRITICAL(Debug_Emulated, "debug_buffer=\n{}", hexdump);
}
has_dumped_buffer = true;
};
switch (break_reason) {
case BreakReason::Panic:
LOG_CRITICAL(Debug_Emulated, "Userspace PANIC! info1=0x{:016X}, info2=0x{:016X}", info1,
info2);
handle_debug_buffer(info1, info2);
break;
case BreakReason::Assert:
LOG_CRITICAL(Debug_Emulated, "Userspace Assertion failed! info1=0x{:016X}, info2=0x{:016X}",
info1, info2);
handle_debug_buffer(info1, info2);
break;
case BreakReason::User:
LOG_WARNING(Debug_Emulated, "Userspace Break! 0x{:016X} with size 0x{:016X}", info1, info2);
handle_debug_buffer(info1, info2);
break;
case BreakReason::PreLoadDll:
LOG_INFO(Debug_Emulated,
"Userspace Attempting to load an NRO at 0x{:016X} with size 0x{:016X}", info1,
info2);
break;
case BreakReason::PostLoadDll:
LOG_INFO(Debug_Emulated, "Userspace Loaded an NRO at 0x{:016X} with size 0x{:016X}", info1,
info2);
break;
case BreakReason::PreUnloadDll:
LOG_INFO(Debug_Emulated,
"Userspace Attempting to unload an NRO at 0x{:016X} with size 0x{:016X}", info1,
info2);
break;
case BreakReason::PostUnloadDll:
LOG_INFO(Debug_Emulated, "Userspace Unloaded an NRO at 0x{:016X} with size 0x{:016X}",
info1, info2);
break;
case BreakReason::CppException:
LOG_CRITICAL(Debug_Emulated, "Signalling debugger. Uncaught C++ exception encountered.");
break;
default:
LOG_WARNING(
Debug_Emulated,
"Signalling debugger, Unknown break reason {:#X}, info1=0x{:016X}, info2=0x{:016X}",
reason, info1, info2);
handle_debug_buffer(info1, info2);
break;
}
system.GetReporter().SaveSvcBreakReport(reason, notification_only, info1, info2,
has_dumped_buffer ? std::make_optional(debug_buffer)
: std::nullopt);
if (!notification_only) {
LOG_CRITICAL(
Debug_Emulated,
"Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
reason, info1, info2);
handle_debug_buffer(info1, info2);
auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
const auto thread_processor_id = current_thread->GetActiveCore();
system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
}
if (system.DebuggerEnabled()) {
auto* thread = system.Kernel().GetCurrentEmuThread();
system.GetDebugger().NotifyThreadStopped(thread);
thread->RequestSuspend(Kernel::SuspendType::Debug);
}
}
void Break32(Core::System& system, u32 reason, u32 info1, u32 info2) {
Break(system, reason, info1, info2);
}
} // namespace Kernel::Svc

View File

@ -0,0 +1,282 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/core.h"
#include "core/core_timing.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/svc.h"
namespace Kernel::Svc {
/// Gets system/memory information for the current process
Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle, u64 info_sub_id) {
LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
info_sub_id, handle);
const auto info_id_type = static_cast<InfoType>(info_id);
switch (info_id_type) {
case InfoType::CoreMask:
case InfoType::PriorityMask:
case InfoType::AliasRegionAddress:
case InfoType::AliasRegionSize:
case InfoType::HeapRegionAddress:
case InfoType::HeapRegionSize:
case InfoType::AslrRegionAddress:
case InfoType::AslrRegionSize:
case InfoType::StackRegionAddress:
case InfoType::StackRegionSize:
case InfoType::TotalMemorySize:
case InfoType::UsedMemorySize:
case InfoType::SystemResourceSizeTotal:
case InfoType::SystemResourceSizeUsed:
case InfoType::ProgramId:
case InfoType::UserExceptionContextAddress:
case InfoType::TotalNonSystemMemorySize:
case InfoType::UsedNonSystemMemorySize:
case InfoType::IsApplication:
case InfoType::FreeThreadCount: {
if (info_sub_id != 0) {
LOG_ERROR(Kernel_SVC, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
info_sub_id);
return ResultInvalidEnumValue;
}
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
KScopedAutoObject process = handle_table.GetObject<KProcess>(handle);
if (process.IsNull()) {
LOG_ERROR(Kernel_SVC, "Process is not valid! info_id={}, info_sub_id={}, handle={:08X}",
info_id, info_sub_id, handle);
return ResultInvalidHandle;
}
switch (info_id_type) {
case InfoType::CoreMask:
*result = process->GetCoreMask();
return ResultSuccess;
case InfoType::PriorityMask:
*result = process->GetPriorityMask();
return ResultSuccess;
case InfoType::AliasRegionAddress:
*result = process->PageTable().GetAliasRegionStart();
return ResultSuccess;
case InfoType::AliasRegionSize:
*result = process->PageTable().GetAliasRegionSize();
return ResultSuccess;
case InfoType::HeapRegionAddress:
*result = process->PageTable().GetHeapRegionStart();
return ResultSuccess;
case InfoType::HeapRegionSize:
*result = process->PageTable().GetHeapRegionSize();
return ResultSuccess;
case InfoType::AslrRegionAddress:
*result = process->PageTable().GetAliasCodeRegionStart();
return ResultSuccess;
case InfoType::AslrRegionSize:
*result = process->PageTable().GetAliasCodeRegionSize();
return ResultSuccess;
case InfoType::StackRegionAddress:
*result = process->PageTable().GetStackRegionStart();
return ResultSuccess;
case InfoType::StackRegionSize:
*result = process->PageTable().GetStackRegionSize();
return ResultSuccess;
case InfoType::TotalMemorySize:
*result = process->GetTotalPhysicalMemoryAvailable();
return ResultSuccess;
case InfoType::UsedMemorySize:
*result = process->GetTotalPhysicalMemoryUsed();
return ResultSuccess;
case InfoType::SystemResourceSizeTotal:
*result = process->GetSystemResourceSize();
return ResultSuccess;
case InfoType::SystemResourceSizeUsed:
LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage");
*result = process->GetSystemResourceUsage();
return ResultSuccess;
case InfoType::ProgramId:
*result = process->GetProgramID();
return ResultSuccess;
case InfoType::UserExceptionContextAddress:
*result = process->GetProcessLocalRegionAddress();
return ResultSuccess;
case InfoType::TotalNonSystemMemorySize:
*result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource();
return ResultSuccess;
case InfoType::UsedNonSystemMemorySize:
*result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource();
return ResultSuccess;
case InfoType::FreeThreadCount:
*result = process->GetFreeThreadCount();
return ResultSuccess;
default:
break;
}
LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
return ResultInvalidEnumValue;
}
case InfoType::DebuggerAttached:
*result = 0;
return ResultSuccess;
case InfoType::ResourceLimit: {
if (handle != 0) {
LOG_ERROR(Kernel, "Handle is non zero! handle={:08X}", handle);
return ResultInvalidHandle;
}
if (info_sub_id != 0) {
LOG_ERROR(Kernel, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
info_sub_id);
return ResultInvalidCombination;
}
KProcess* const current_process = system.Kernel().CurrentProcess();
KHandleTable& handle_table = current_process->GetHandleTable();
const auto resource_limit = current_process->GetResourceLimit();
if (!resource_limit) {
*result = Svc::InvalidHandle;
// Yes, the kernel considers this a successful operation.
return ResultSuccess;
}
Handle resource_handle{};
R_TRY(handle_table.Add(&resource_handle, resource_limit));
*result = resource_handle;
return ResultSuccess;
}
case InfoType::RandomEntropy:
if (handle != 0) {
LOG_ERROR(Kernel_SVC, "Process Handle is non zero, expected 0 result but got {:016X}",
handle);
return ResultInvalidHandle;
}
if (info_sub_id >= KProcess::RANDOM_ENTROPY_SIZE) {
LOG_ERROR(Kernel_SVC, "Entropy size is out of range, expected {} but got {}",
KProcess::RANDOM_ENTROPY_SIZE, info_sub_id);
return ResultInvalidCombination;
}
*result = system.Kernel().CurrentProcess()->GetRandomEntropy(info_sub_id);
return ResultSuccess;
case InfoType::InitialProcessIdRange:
LOG_WARNING(Kernel_SVC,
"(STUBBED) Attempted to query privileged process id bounds, returned 0");
*result = 0;
return ResultSuccess;
case InfoType::ThreadTickCount: {
constexpr u64 num_cpus = 4;
if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id >= num_cpus) {
LOG_ERROR(Kernel_SVC, "Core count is out of range, expected {} but got {}", num_cpus,
info_sub_id);
return ResultInvalidCombination;
}
KScopedAutoObject thread =
system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(
static_cast<Handle>(handle));
if (thread.IsNull()) {
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}",
static_cast<Handle>(handle));
return ResultInvalidHandle;
}
const auto& core_timing = system.CoreTiming();
const auto& scheduler = *system.Kernel().CurrentScheduler();
const auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
const bool same_thread = current_thread == thread.GetPointerUnsafe();
const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTime();
u64 out_ticks = 0;
if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
const u64 thread_ticks = current_thread->GetCpuTime();
out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
} else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) {
out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
}
*result = out_ticks;
return ResultSuccess;
}
case InfoType::IdleTickCount: {
// Verify the input handle is invalid.
R_UNLESS(handle == InvalidHandle, ResultInvalidHandle);
// Verify the requested core is valid.
const bool core_valid =
(info_sub_id == 0xFFFFFFFFFFFFFFFF) ||
(info_sub_id == static_cast<u64>(system.Kernel().CurrentPhysicalCoreIndex()));
R_UNLESS(core_valid, ResultInvalidCombination);
// Get the idle tick count.
*result = system.Kernel().CurrentScheduler()->GetIdleThread()->GetCpuTime();
return ResultSuccess;
}
case InfoType::MesosphereCurrentProcess: {
// Verify the input handle is invalid.
R_UNLESS(handle == InvalidHandle, ResultInvalidHandle);
// Verify the sub-type is valid.
R_UNLESS(info_sub_id == 0, ResultInvalidCombination);
// Get the handle table.
KProcess* current_process = system.Kernel().CurrentProcess();
KHandleTable& handle_table = current_process->GetHandleTable();
// Get a new handle for the current process.
Handle tmp;
R_TRY(handle_table.Add(&tmp, current_process));
// Set the output.
*result = tmp;
// We succeeded.
return ResultSuccess;
}
default:
LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
return ResultInvalidEnumValue;
}
}
Result GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low,
u32 info_id, u32 handle, u32 sub_id_high) {
const u64 sub_id{u64{sub_id_low} | (u64{sub_id_high} << 32)};
u64 res_value{};
const Result result{GetInfo(system, &res_value, info_id, handle, sub_id)};
*result_high = static_cast<u32>(res_value >> 32);
*result_low = static_cast<u32>(res_value & std::numeric_limits<u32>::max());
return result;
}
} // namespace Kernel::Svc

View File

@ -0,0 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/svc.h"
namespace Kernel::Svc {} // namespace Kernel::Svc

View File

@ -0,0 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/svc.h"
namespace Kernel::Svc {} // namespace Kernel::Svc

View File

@ -0,0 +1,89 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/scope_exit.h"
#include "core/core.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/svc.h"
namespace Kernel::Svc {
/// Makes a blocking IPC call to a service.
Result SendSyncRequest(Core::System& system, Handle handle) {
auto& kernel = system.Kernel();
// Get the client session from its handle.
KScopedAutoObject session =
kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle);
R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
return session->SendSyncRequest();
}
Result SendSyncRequest32(Core::System& system, Handle handle) {
return SendSyncRequest(system, handle);
}
Result ReplyAndReceive(Core::System& system, s32* out_index, Handle* handles, s32 num_handles,
Handle reply_target, s64 timeout_ns) {
auto& kernel = system.Kernel();
auto& handle_table = GetCurrentThread(kernel).GetOwnerProcess()->GetHandleTable();
// Convert handle list to object table.
std::vector<KSynchronizationObject*> objs(num_handles);
R_UNLESS(
handle_table.GetMultipleObjects<KSynchronizationObject>(objs.data(), handles, num_handles),
ResultInvalidHandle);
// Ensure handles are closed when we're done.
SCOPE_EXIT({
for (auto i = 0; i < num_handles; ++i) {
objs[i]->Close();
}
});
// Reply to the target, if one is specified.
if (reply_target != InvalidHandle) {
KScopedAutoObject session = handle_table.GetObject<KServerSession>(reply_target);
R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
// If we fail to reply, we want to set the output index to -1.
ON_RESULT_FAILURE {
*out_index = -1;
};
// Send the reply.
R_TRY(session->SendReply());
}
// Wait for a message.
while (true) {
// Wait for an object.
s32 index;
Result result = KSynchronizationObject::Wait(kernel, &index, objs.data(),
static_cast<s32>(objs.size()), timeout_ns);
if (result == ResultTimedOut) {
return result;
}
// Receive the request.
if (R_SUCCEEDED(result)) {
KServerSession* session = objs[index]->DynamicCast<KServerSession*>();
if (session != nullptr) {
result = session->ReceiveRequest();
if (result == ResultNotFound) {
continue;
}
}
}
*out_index = index;
return result;
}
}
} // namespace Kernel::Svc

View File

@ -0,0 +1,19 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/svc.h"
namespace Kernel::Svc {
void KernelDebug([[maybe_unused]] Core::System& system, [[maybe_unused]] u32 kernel_debug_type,
[[maybe_unused]] u64 param1, [[maybe_unused]] u64 param2,
[[maybe_unused]] u64 param3) {
// Intentionally do nothing, as this does nothing in released kernel binaries.
}
void ChangeKernelTraceState([[maybe_unused]] Core::System& system,
[[maybe_unused]] u32 trace_state) {
// Intentionally do nothing, as this does nothing in released kernel binaries.
}
} // namespace Kernel::Svc

View File

@ -0,0 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/svc.h"
namespace Kernel::Svc {} // namespace Kernel::Svc

View File

@ -0,0 +1,57 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/core.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/svc.h"
namespace Kernel::Svc {
/// Attempts to locks a mutex
Result ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address, u32 tag) {
LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
thread_handle, address, tag);
// Validate the input address.
if (IsKernelAddress(address)) {
LOG_ERROR(Kernel_SVC, "Attempting to arbitrate a lock on a kernel address (address={:08X})",
address);
return ResultInvalidCurrentMemory;
}
if (!Common::IsAligned(address, sizeof(u32))) {
LOG_ERROR(Kernel_SVC, "Input address must be 4 byte aligned (address: {:08X})", address);
return ResultInvalidAddress;
}
return system.Kernel().CurrentProcess()->WaitForAddress(thread_handle, address, tag);
}
Result ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address, u32 tag) {
return ArbitrateLock(system, thread_handle, address, tag);
}
/// Unlock a mutex
Result ArbitrateUnlock(Core::System& system, VAddr address) {
LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
// Validate the input address.
if (IsKernelAddress(address)) {
LOG_ERROR(Kernel_SVC,
"Attempting to arbitrate an unlock on a kernel address (address={:08X})",
address);
return ResultInvalidCurrentMemory;
}
if (!Common::IsAligned(address, sizeof(u32))) {
LOG_ERROR(Kernel_SVC, "Input address must be 4 byte aligned (address: {:08X})", address);
return ResultInvalidAddress;
}
return system.Kernel().CurrentProcess()->SignalToAddress(address);
}
Result ArbitrateUnlock32(Core::System& system, u32 address) {
return ArbitrateUnlock(system, address);
}
} // namespace Kernel::Svc

View File

@ -0,0 +1,189 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/core.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/svc.h"
namespace Kernel::Svc {
namespace {
constexpr bool IsValidSetMemoryPermission(MemoryPermission perm) {
switch (perm) {
case MemoryPermission::None:
case MemoryPermission::Read:
case MemoryPermission::ReadWrite:
return true;
default:
return false;
}
}
// Checks if address + size is greater than the given address
// This can return false if the size causes an overflow of a 64-bit type
// or if the given size is zero.
constexpr bool IsValidAddressRange(VAddr address, u64 size) {
return address + size > address;
}
// Helper function that performs the common sanity checks for svcMapMemory
// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
// in the same order.
Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAddr src_addr,
u64 size) {
if (!Common::Is4KBAligned(dst_addr)) {
LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
return ResultInvalidAddress;
}
if (!Common::Is4KBAligned(src_addr)) {
LOG_ERROR(Kernel_SVC, "Source address is not aligned to 4KB, 0x{:016X}", src_addr);
return ResultInvalidSize;
}
if (size == 0) {
LOG_ERROR(Kernel_SVC, "Size is 0");
return ResultInvalidSize;
}
if (!Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:016X}", size);
return ResultInvalidSize;
}
if (!IsValidAddressRange(dst_addr, size)) {
LOG_ERROR(Kernel_SVC,
"Destination is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
dst_addr, size);
return ResultInvalidCurrentMemory;
}
if (!IsValidAddressRange(src_addr, size)) {
LOG_ERROR(Kernel_SVC, "Source is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
src_addr, size);
return ResultInvalidCurrentMemory;
}
if (!manager.IsInsideAddressSpace(src_addr, size)) {
LOG_ERROR(Kernel_SVC,
"Source is not within the address space, addr=0x{:016X}, size=0x{:016X}",
src_addr, size);
return ResultInvalidCurrentMemory;
}
if (manager.IsOutsideStackRegion(dst_addr, size)) {
LOG_ERROR(Kernel_SVC,
"Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}",
dst_addr, size);
return ResultInvalidMemoryRegion;
}
if (manager.IsInsideHeapRegion(dst_addr, size)) {
LOG_ERROR(Kernel_SVC,
"Destination does not fit within the heap region, addr=0x{:016X}, "
"size=0x{:016X}",
dst_addr, size);
return ResultInvalidMemoryRegion;
}
if (manager.IsInsideAliasRegion(dst_addr, size)) {
LOG_ERROR(Kernel_SVC,
"Destination does not fit within the map region, addr=0x{:016X}, "
"size=0x{:016X}",
dst_addr, size);
return ResultInvalidMemoryRegion;
}
return ResultSuccess;
}
} // namespace
Result SetMemoryPermission(Core::System& system, VAddr address, u64 size, MemoryPermission perm) {
LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size,
perm);
// Validate address / size.
R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
R_UNLESS(size > 0, ResultInvalidSize);
R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
// Validate the permission.
R_UNLESS(IsValidSetMemoryPermission(perm), ResultInvalidNewMemoryPermission);
// Validate that the region is in range for the current process.
auto& page_table = system.Kernel().CurrentProcess()->PageTable();
R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
// Set the memory attribute.
return page_table.SetMemoryPermission(address, size, perm);
}
Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, u32 attr) {
LOG_DEBUG(Kernel_SVC,
"called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
size, mask, attr);
// Validate address / size.
R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
R_UNLESS(size > 0, ResultInvalidSize);
R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
// Validate the attribute and mask.
constexpr u32 SupportedMask = static_cast<u32>(MemoryAttribute::Uncached);
R_UNLESS((mask | attr) == mask, ResultInvalidCombination);
R_UNLESS((mask | attr | SupportedMask) == SupportedMask, ResultInvalidCombination);
// Validate that the region is in range for the current process.
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
// Set the memory attribute.
return page_table.SetMemoryAttribute(address, size, mask, attr);
}
Result SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask, u32 attr) {
return SetMemoryAttribute(system, address, size, mask, attr);
}
/// Maps a memory range into a different range.
Result MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
if (const Result result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
result.IsError()) {
return result;
}
return page_table.MapMemory(dst_addr, src_addr, size);
}
Result MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
return MapMemory(system, dst_addr, src_addr, size);
}
/// Unmaps a region that was previously mapped with svcMapMemory
Result UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
if (const Result result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
result.IsError()) {
return result;
}
return page_table.UnmapMemory(dst_addr, src_addr, size);
}
Result UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
return UnmapMemory(system, dst_addr, src_addr, size);
}
} // namespace Kernel::Svc

Some files were not shown because too many files have changed in this diff Show More