diff --git a/.gitignore b/.gitignore index b6c550c..8634188 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,9 @@ /output /build /lib +/.vs/ +/.vscode/ +/.idea/ *.d *.a *.o diff --git a/libhydrosphere/CPPLINT.cfg b/libhydrosphere/CPPLINT.cfg index 99eb05d..e4ef613 100644 --- a/libhydrosphere/CPPLINT.cfg +++ b/libhydrosphere/CPPLINT.cfg @@ -8,5 +8,5 @@ set noparent -filter=-runtime/references +filter=-runtime/references,-runtime/int,-build/include_what_you_use exclude_files=external|build diff --git a/libhydrosphere/fix_codestyle b/libhydrosphere/fix_codestyle old mode 100644 new mode 100755 diff --git a/libhydrosphere/include/hs/diag/diag_api.hpp b/libhydrosphere/include/hs/diag/diag_api.hpp index a591797..61bc02d 100644 --- a/libhydrosphere/include/hs/diag/diag_api.hpp +++ b/libhydrosphere/include/hs/diag/diag_api.hpp @@ -23,31 +23,38 @@ namespace hs::diag { /** * \short Causes abnormal process termination. - * - * This send abort information to an availaible logger (currently only hs::svc::OutputDebugString) before calling hs::svc::Break. - * - * \remark In the future, this will propagate the abort to an observer for better debugging capabilities. + * + * This send abort information to an availaible logger (currently only + * hs::svc::OutputDebugString) before calling hs::svc::Break. + * + * \remark In the future, this will propagate the abort to an observer for + * better debugging capabilities. */ void Abort(const char *failed_condition, const char *file_name, const char *function_name, int line_number) noexcept; /** * \short Causes abnormal process termination with a custom message. - * - * This send abort information to an availaible logger (currently only hs::svc::OutputDebugString) before calling hs::svc::Break. - * - * \remark In the future, this will propagate the abort to an observer for better debugging capabilities. + * + * This send abort information to an availaible logger (currently only + * hs::svc::OutputDebugString) before calling hs::svc::Break. + * + * \remark In the future, this will propagate the abort to an observer for + * better debugging capabilities. */ void Abort(const char *failed_condition, const char *file_name, const char *function_name, int line_number, const char *format, ...) noexcept; /** - * \short Causes abnormal process termination with a hs::Result and a custom message. - * - * This send abort information to an availaible logger (currently only hs::svc::OutputDebugString) before calling hs::svc::Break. - * - * \remark In the future, this will propagate the abort to an observer for better debugging capabilities. + * \short Causes abnormal process termination with a hs::Result and a custom + * message. + * + * This send abort information to an availaible logger (currently only + * hs::svc::OutputDebugString) before calling hs::svc::Break. + * + * \remark In the future, this will propagate the abort to an observer for + * better debugging capabilities. */ void Abort(const char *failed_condition, const char *file_name, const char *function_name, int line_number, const hs::Result *result, diff --git a/libhydrosphere/include/hs/diag/diag_macro.hpp b/libhydrosphere/include/hs/diag/diag_macro.hpp index e85324e..97a0bc5 100644 --- a/libhydrosphere/include/hs/diag/diag_macro.hpp +++ b/libhydrosphere/include/hs/diag/diag_macro.hpp @@ -22,34 +22,37 @@ /** * \def __HS_ABORT_CONDITIONAL(condition, ...) - * \short Calls hs::diag::Abort with a given formatable message if the condition evaluates to false. - * \remark The formatable message is striped out in release builds. -*/ + * \short Calls hs::diag::Abort with a given formatable message if the condition + * evaluates to false. \remark The formatable message is striped out in release + * builds. + */ /** * \def __HS_DEBUG_ASSERT(condition) - * \short Calls hs::diag::Abort if the condition evaluates to false in debug builds. -*/ + * \short Calls hs::diag::Abort if the condition evaluates to false in debug + * builds. + */ /** * \def __HS_ABORT() * \short Unconditionally calls hs::diag::Abort. -*/ + */ /** * \def __HS_ASSERT(condition) * \short Calls hs::diag::Abort if the condition evaluates to false. -*/ + */ /** * \def __HS_ABORT_UNLESS_NOT_NULL(ptr) * \short Calls hs::diag::Abort if the given ptr is null. -*/ + */ /** * \def __HS_ABORT_CONDITIONAL_RESULT(condition, result) - * \short Calls hs::diag::Abort with a given result if the condition evaluates to false. -*/ + * \short Calls hs::diag::Abort with a given result if the condition evaluates + * to false. + */ #ifdef HYDROSPHERE_DEBUG_DIAG #define __HS_ABORT_CONDITIONAL(condition, ...) \ @@ -65,14 +68,12 @@ #define __HS_DEBUG_ASSERT(condition, ...) #endif -#define __HS_ABORT() \ - __HS_ABORT_CONDITIONAL(false, "Unknown Hydrosphere abort") +#define __HS_ABORT() __HS_ABORT_CONDITIONAL(false, "Unknown Hydrosphere abort") #define __HS_ASSERT(condition) \ __HS_ABORT_CONDITIONAL(condition, "Assertion failed") -#define __HS_ABORT_UNLESS_NOT_NULL(ptr) \ - __HS_ASSERT(ptr != nullptr) +#define __HS_ABORT_UNLESS_NOT_NULL(ptr) __HS_ASSERT(ptr != nullptr) -#define __HS_ABORT_CONDITIONAL_RESULT(condition, result) \ +#define __HS_ABORT_CONDITIONAL_RESULT(condition, result) \ __HS_ABORT_CONDITIONAL(condition, &result, "Assertion failed") /** diff --git a/libhydrosphere/include/hs/hs_config.hpp b/libhydrosphere/include/hs/hs_config.hpp index 71a7f63..223cf24 100644 --- a/libhydrosphere/include/hs/hs_config.hpp +++ b/libhydrosphere/include/hs/hs_config.hpp @@ -12,9 +12,11 @@ #ifdef __aarch64__ #define HYDROSPHERE_TARGET_ARCH_NAME aarch64 #define HYDROSPHERE_TARGET_AARCH64 1 +#define __HS_CPU_PTR_ALIGN 0x10 #elif __arm__ #define HYDROSPHERE_TARGET_ARCH_NAME aarch32 #define HYDROSPHERE_TARGET_AARCH32 1 +#define __HS_CPU_PTR_ALIGN 0x4 #else #error "Cannot determine the target architecture!" #endif diff --git a/libhydrosphere/include/hs/hs_macro.hpp b/libhydrosphere/include/hs/hs_macro.hpp index 11db822..2fe51d3 100644 --- a/libhydrosphere/include/hs/hs_macro.hpp +++ b/libhydrosphere/include/hs/hs_macro.hpp @@ -21,8 +21,8 @@ #define __HS_ATTRIBUTE_ALIGNED(align) __attribute__((aligned(align))) #define __HS_ASM __asm__ -#define __HS_DISALLOW_COPY(TypeName) \ - /** \private */ \ +#define __HS_DISALLOW_COPY(TypeName) \ + /** \private */ \ TypeName(const TypeName&) = delete #define __HS_DISALLOW_ASSIGN(TypeName) \ /** \private */ \ diff --git a/libhydrosphere/include/hs/hs_result.hpp b/libhydrosphere/include/hs/hs_result.hpp index 6431e46..6a06b79 100644 --- a/libhydrosphere/include/hs/hs_result.hpp +++ b/libhydrosphere/include/hs/hs_result.hpp @@ -24,9 +24,9 @@ class Result { public: /** - * \short Construct a Result from a raw value. - * \param[in] value The raw result value that defines the new Result. - */ + * \short Construct a Result from a raw value. + * \param[in] value The raw result value that defines the new Result. + */ explicit Result(uint32_t value) noexcept : value(value) {} /** @@ -35,7 +35,8 @@ class Result { inline bool Ok() const noexcept { return value == Success; } /** - * \short Check if the raw result code value doesn't match the Result::Success value + * \short Check if the raw result code value doesn't match the + * Result::Success value */ inline bool Err() const noexcept { return !Ok(); } diff --git a/libhydrosphere/include/hs/mem.hpp b/libhydrosphere/include/hs/mem.hpp new file mode 100644 index 0000000..63131e5 --- /dev/null +++ b/libhydrosphere/include/hs/mem.hpp @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2019 Hydrosphère Developers + * + * Licensed under the Apache License, Version 2.0 or the MIT license + * , at your + * option. This file may not be copied, modified, or distributed + * except according to those terms. + */ + +#pragma once + +#include +#include +#include +#include +#include diff --git a/libhydrosphere/include/hs/mem/memory_allocator.hpp b/libhydrosphere/include/hs/mem/memory_allocator.hpp new file mode 100644 index 0000000..966144c --- /dev/null +++ b/libhydrosphere/include/hs/mem/memory_allocator.hpp @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2019 Hydrosphère Developers + * + * Licensed under the Apache License, Version 2.0 or the MIT license + * , at your + * option. This file may not be copied, modified, or distributed + * except according to those terms. + */ + +#pragma once +#include + +#include +#include +#include +#include + +namespace hs::mem { +/** + * \short Impelements a memory allocator that maps a given block + * of memory and manages it. + */ +class MemoryAllocator { + public: + /** + * \short Constructs a new MemoryAllocator. + * \param[in] start_address The start address of the memory region. + * \param[in] region_size The size of the memory region. + */ + MemoryAllocator(void *start_address, size_t region_size) noexcept + : free_list_(), start_address_(start_address), + region_size_(region_size) { + used_memory_ = 0; + + MemoryBlock *first_block = reinterpret_cast( + start_address); + // Enforce the constructor is called to avoid using uninialized data. + new (first_block) MemoryBlock(); + + first_block->size = region_size - sizeof(MemoryBlock); + free_list_.push_front(*first_block); + } + + /** + * \short Constructs a new MemoryAllocator. + * \param[in] start_address The start address of the memory region. + * \param[in] end_address The end address of the memory region. + */ + MemoryAllocator(void *start_address, void *end_address) noexcept + : MemoryAllocator( + start_address, + static_cast(static_cast(end_address) - + static_cast(start_address))) {} + + /** + * \short Allocates memory on the mapped region. + * \param[in] size The size of memory to allocate. + * \param[in] alignment The alignment to use. Defaults to 16. + * \return Pointer to the allocated memory block. + */ + void *Allocate(size_t size, size_t alignment = __HS_CPU_PTR_ALIGN) + noexcept { + if (size == 0 || alignment == 0) { + return nullptr; + } + + // Ensure CPU ptr alignment + size = memory_align(memory_align(size, alignment), __HS_CPU_PTR_ALIGN); + + void *ptr = nullptr; + + for (auto it = free_list_.begin(); it != free_list_.end(); ++it) { + // FIXME: Really really dirty, change this! + hs::util::IntrusiveList::const_iterator cit = + hs::util::IntrusiveList::const_iterator(it); + + MemoryBlock &free_block = *it; + + __HS_DEBUG_ASSERT(free_block.size != 0); + + if (free_block.size < size) { + continue; + } + + size_t remaining_bytes = free_block.size - size; + + // Found a block that fit corectly. + if (remaining_bytes >= sizeof(MemoryBlock) || + remaining_bytes >= 0) { + char *block_data_start = reinterpret_cast(&free_block) + + sizeof(MemoryBlock); + + if (remaining_bytes > sizeof(MemoryBlock)) { + // We have enough space for the block and data, we split + // our block. + free_block.size -= remaining_bytes; + + MemoryBlock *new_block = reinterpret_cast( + block_data_start + free_block.size); + // Enforce the constructor is called to avoid using + // uninialized data. + new (new_block) MemoryBlock(); + new_block->size = remaining_bytes - sizeof(MemoryBlock); + + free_list_.insert(cit, *new_block); + } + + ptr = block_data_start; + free_list_.remove(*cit); + break; + } + } + + return ptr; + } + + /** + * \short Frees allocated memory. + * \param[in] pointer A pointer to the memory block to free. + */ + void Free(void *pointer) noexcept { + if (pointer == nullptr) { + return; + } + + MemoryBlock *block = GetMemoryBlockFromUserAddress(pointer); + + // Invalid pointer + if (block == nullptr) { + return; + } + + // If the free list is empty, add the block to it! + if (free_list_.empty()) { + free_list_.push_front(*block); + } else { + MemoryBlock *front_free_block = &free_list_.front(); + + // If the block is before the first element of the free list + if (front_free_block < block) { + // If our block is just before the first element, we replace + // the first element + if (block->IsJustAfterMemoryBlock(front_free_block)) { + block->size += front_free_block->size; + free_list_.remove(*front_free_block); + } + + free_list_.push_front(*block); + + return; + } + + // Search for the position to insert ourself. + auto target_position = free_list_.cbegin(); + + for (; target_position != free_list_.cend(); ++target_position) { + const MemoryBlock *tmp = &*target_position; + if (tmp > block) { + break; + } + } + + // Insert the our block in the free list. + free_list_.insert(target_position, *block); + + // If our block is just before the our target position, we replace + // the first element. + // TODO(Kaenbyō): Also try to merge chunk before the block that is + // being freed. + if (block->IsJustAfterMemoryBlock(&*target_position)) { + block->size += front_free_block->size; + free_list_.erase(target_position); + } + } + } + + private: + struct MemoryBlock : public hs::util::IntrusiveListElement<> { + size_t size; + char reserved[4]; + + bool IsJustAfterMemoryBlock(const MemoryBlock *other) const { + const char *other_raw = reinterpret_cast(other); + const char *this_raw = reinterpret_cast(this); + + return other_raw + other->size == this_raw; + } + }; + + #if HYDROSPHERE_TARGET_AARCH64 + static_assert(sizeof(MemoryBlock) == 0x20, "MemoryBlock size isn't right"); + #elif HYDROSPHERE_TARGET_AARCH32 + static_assert(sizeof(MemoryBlock) == 0x10, "MemoryBlock size isn't right"); + #endif + + hs::util::IntrusiveList free_list_; + + void *start_address_; + + size_t region_size_; + + size_t used_memory_; + + constexpr size_t memory_align(size_t size, size_t alignment) const { + return (size + alignment - 1) & ~(alignment - 1); + } + + MemoryBlock *GetMemoryBlockFromUserAddress(void *pointer) noexcept { + char *block_position = static_cast(pointer) - + sizeof(MemoryBlock); + char *start_address = static_cast(start_address_); + + // Check the pointer sanity before accepting it + if (start_address_ < block_position || + block_position > start_address + region_size_) { + return nullptr; + } + + return reinterpret_cast(block_position); + } +}; +} // namespace hs::mem diff --git a/libhydrosphere/include/hs/mem/memory_api.hpp b/libhydrosphere/include/hs/mem/memory_api.hpp new file mode 100644 index 0000000..7f53d0a --- /dev/null +++ b/libhydrosphere/include/hs/mem/memory_api.hpp @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2019 Hydrosphère Developers + * + * Licensed under the Apache License, Version 2.0 or the MIT license + * , at your + * option. This file may not be copied, modified, or distributed + * except according to those terms. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace hs::mem { +template struct memory_allocator_delete { + Alloc &allocator; + // Default constructor + constexpr explicit memory_allocator_delete(Alloc &allocator) noexcept : + allocator(allocator) {} + + template memory_allocator_delete( + const memory_allocator_delete& d) noexcept : + allocator(d.allocator) {} + + void operator()(T* ptr) const { + ptr->~T(); + allocator.Free(ptr); + } +}; + +template + unique_ptr> allocate_unique(Alloc& a, + Args&&... args) { + + // First we allocate T. + T *data = reinterpret_cast(a.Allocate(sizeof(T))); + + // Allocation might fail so assert in that case. + __HS_ASSERT(data != nullptr); + + // Then we call the constructor using args sent by the user. + new (data) T(hs::util::forward(args)...); + + // Finally construct the unique_ptr. + return unique_ptr>(data, + memory_allocator_delete(a)); +} + +template + shared_ptr< + T, + memory_allocator_delete, + memory_allocator_delete + > allocate_shared(Alloc& a, Args&&... args) { + // First we allocate T. + T *data = reinterpret_cast(a.Allocate(sizeof(T))); + + // Allocation might fail so assert in that case. + __HS_ASSERT(data != nullptr); + + // Then we call the constructor using args sent by the user. + new (data) T(hs::util::forward(args)...); + + // Finally construct the shared_ptr. + return shared_ptr, + memory_allocator_delete>( + data, + memory_allocator_delete(a), + memory_allocator_delete(a), + a); +} + + + +} // namespace hs::mem diff --git a/libhydrosphere/include/hs/mem/memory_default_delete.hpp b/libhydrosphere/include/hs/mem/memory_default_delete.hpp new file mode 100644 index 0000000..aa2e890 --- /dev/null +++ b/libhydrosphere/include/hs/mem/memory_default_delete.hpp @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2019 Hydrosphère Developers + * + * Licensed under the Apache License, Version 2.0 or the MIT license + * , at your + * option. This file may not be copied, modified, or distributed + * except according to those terms. + */ + +#pragma once + +namespace hs::mem { +template struct default_delete { + // Default constructor + constexpr default_delete() noexcept = default; + + template default_delete(const default_delete& d) noexcept; + + void operator()(T* ptr) const { + delete ptr; + } +}; +} // namespace hs::mem diff --git a/libhydrosphere/include/hs/mem/shared_ptr.hpp b/libhydrosphere/include/hs/mem/shared_ptr.hpp new file mode 100644 index 0000000..5ee644b --- /dev/null +++ b/libhydrosphere/include/hs/mem/shared_ptr.hpp @@ -0,0 +1,314 @@ +/* + * Copyright (c) 2019 Hydrosphère Developers + * + * Licensed under the Apache License, Version 2.0 or the MIT license + * , at your + * option. This file may not be copied, modified, or distributed + * except according to those terms. + */ + +#pragma once + +#include + +#include +#include + +namespace hs::mem { +template +class shared_ptr_count { + public: + shared_ptr_count(long *ref_count, DeleterRefCount &d) : + ref_count_(ref_count), deleter_ref_count_(d) { + if (ref_count_ != nullptr) { + *ref_count_ = 0; + } + } + + shared_ptr_count(const shared_ptr_count &count) = default; + + void swap(shared_ptr_count &count) noexcept { + auto temp = ref_count_; + ref_count_ = count.ref_count_; + count.ref_count_ = temp; + + auto temp_deleter = deleter_ref_count_; + deleter_ref_count_ = count.deleter_ref_count_; + count.deleter_ref_count_ = deleter_ref_count_; + } + + long use_count() const noexcept { + return (ref_count_ != nullptr) ? *ref_count_ : 0; + } + + template + void acquire(U *ptr) { + if (ptr == nullptr) return; + + // If the ref_count isn't allocated, we are always sure that we don't + // have some custom allocator so we use the standard new. + if (ref_count_ == nullptr) + ref_count_ = new long(1); // may throw std::bad_alloc (?) + else + ++(*ref_count_); + } + + template + void release(D &deleter, U *ptr) noexcept { + if (ref_count_ == nullptr) return; + + --(*ref_count_); + if (*ref_count_ <= 0) { + deleter(ptr); + deleter_ref_count_(ref_count_); + } + ref_count_ = nullptr; + } + + private: + // Internal reference counter. + long *ref_count_; + DeleterRefCount &deleter_ref_count_; +}; + +/** + * \short A smart pointer that retains shared ownership of an object through a + * pointer. + */ +template , + class DeleterRefCount = hs::mem::default_delete> +class shared_ptr { + public: + typedef T* pointer; + typedef T element_type; + typedef Deleter deleter_type; + + /** + * \short Constructs a new shared_ptr from a nullptr. + */ + shared_ptr() noexcept : native_ptr_(nullptr), deleter_(), count_(nullptr) {} + + /** + * \short Constructs a new shared_ptr given a raw pointer. + * \param[in] ptr The raw pointer to manage. + */ + explicit shared_ptr(pointer ptr) : deleter_(), count_(nullptr) { + acquire(ptr); + } + + /** + * \short Constructs a new shared_ptr given a raw pointer and a destructor. + * \param[in] ptr The raw pointer to manage. + * \param[in] d The destructor to use when disposing of the pointer. + */ + explicit shared_ptr(pointer ptr, Deleter d) + : deleter_(d), count_(nullptr) { + acquire(ptr); + } + + /** + * \short Constructs a new shared_ptr given a raw pointer, a destructor and an allocator. + * \param[in] ptr The raw pointer to manage. + * \param[in] d The destructor to use when disposing of the pointer. + * \param[in] d2 The destructor to use when disposing of the refcount. + * \param[in] alloc The allocator to use when allocating the refcount. + */ + template + shared_ptr(pointer ptr, Deleter d, DeleterRefCount d2, Alloc &alloc) + : deleter_(d), + count_(reinterpret_cast(alloc.Allocate(sizeof(long))), d2) { + acquire(ptr); + } + + /** + * \short Constructs a new shared_ptr to share ownership. + * \param[in] ptr A shared_ptr to take the reference count from. + * \param[in] p The raw pointer to acquire ownership of. + * \warning This should only be used for pointer casts as it doesn't manage + * two separate and pointers! + */ + template + shared_ptr(const shared_ptr &ptr, pointer p) + : deleter_(ptr.deleter_), count_(ptr.count_) { + acquire(p); + } + + /** + * \short Constructs a new shared_ptr that converts from another pointer + * type. + * \param[in] ptr The shared_ptr to use. + */ + template + explicit shared_ptr(const shared_ptr &ptr) noexcept + : deleter_(ptr.deleter_), count_(ptr.count_) { + acquire(static_cast::element_type *>( + ptr.native_ptr_)); + } + + /** + * \short Constructs a new sàhared_ptr by the copy-and-swap idiom. + * \param[in] ptr The shared_ptr to use. + */ + shared_ptr(const shared_ptr &ptr) noexcept + : deleter_(ptr.deleter_), count_(ptr.count_) { + acquire(ptr.native_ptr_); + } + + /** + * \short Destructs the owned object if no more shared_ptrs link to it. + */ + inline ~shared_ptr() noexcept { release(); } + + /** + * \short Assigns the shared_ptr. + * \param[in] ptr The shared_ptr that should be assigned. + */ + shared_ptr &operator=(shared_ptr ptr) noexcept { + swap(ptr); + return *this; + } + + /** + * \short Releases ownership of the managed object. + */ + inline void reset() noexcept { release(); } + + /** + * \short Replaces the managed object. + * \param[in] p The pointer to replace the contents with. + */ + void reset(pointer p) { + release(); + acquire(p); // May throw std::bad_alloc (?) + } + + /** + * \short Swaps the managed objects. + * \param[in] ptr The pointer to exchange the contents with. + */ + void swap(shared_ptr &ptr) noexcept { + auto temp = native_ptr_; + native_ptr_ = ptr.native_ptr_; + ptr.native_ptr_ = temp; + + count_.swap(ptr.native_ptr_); + } + + // Reference count operations: + + /** + * \short Checks if the stored pointer is not null. + */ + explicit operator bool() const noexcept { return 0 < count_.use_count(); } + + /** + * \short Checks whether the managed object is managed only by the current + * shared_ptr instance. + */ + bool unique() const noexcept { return 1 == count_.use_count(); } + + /** + * \short Returns the number of shared_ptr objects referring to the same + * managed object. + */ + long use_count() const noexcept { return count_.use_count(); } + + // Underlying pointer operations: + + /** + * \short Dereferences the stored pointer. + */ + T &operator*() const noexcept { + return *native_ptr_; + } + + /** + * \short Provides access to the stored pointer. + */ + T *operator->() const noexcept { + return native_ptr_; + } + + /** + * \short Returns the stored pointer. + */ + pointer get() const noexcept { return native_ptr_; } + + private: + pointer native_ptr_; + deleter_type deleter_; + + shared_ptr_count count_; + + + // This allows pointer_cast functions to share the + // reference count between different shared_ptr types. + template + friend class shared_ptr; + + void acquire(pointer p) { + count_.acquire(p); // May throw std::bad_alloc (?) + native_ptr_ = p; + } + + void release() noexcept { + count_.release(deleter_, native_ptr_); + native_ptr_ = nullptr; + } +}; + +// Comparison operators. + +template +inline bool operator==(const shared_ptr &l, + const shared_ptr &r) noexcept { + return l.get() == r.get(); +} + +template +inline bool operator!=(const shared_ptr &l, + const shared_ptr &r) noexcept { + return l.get() != r.get(); +} + +template +inline bool operator<=(const shared_ptr &l, + const shared_ptr &r) noexcept { + return l.get() <= r.get(); +} + +template +inline bool operator<(const shared_ptr &l, const shared_ptr &r) noexcept { + return l.get() < r.get(); +} + +template +inline bool operator>=(const shared_ptr &l, + const shared_ptr &r) noexcept { + return l.get() >= r.get(); +} + +template +inline bool operator>(const shared_ptr &l, const shared_ptr &r) noexcept { + return l.get() > r.get(); +} + +// Static cast of shared_ptr. +template +shared_ptr static_pointer_cast(const shared_ptr &ptr) noexcept { + return shared_ptr( + ptr, static_cast::element_type *>(ptr.get())); +} + +// Dynamic cast of shared_ptr. +template +shared_ptr dynamic_pointer_cast(const shared_ptr &ptr) noexcept { + T *p = dynamic_cast::element_type *>(ptr.get()); + if (p == nullptr) + return shared_ptr(); + else + return shared_ptr(ptr, p); +} +} // namespace hs::mem diff --git a/libhydrosphere/include/hs/mem/unique_ptr.hpp b/libhydrosphere/include/hs/mem/unique_ptr.hpp new file mode 100644 index 0000000..dd25be0 --- /dev/null +++ b/libhydrosphere/include/hs/mem/unique_ptr.hpp @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2019 Hydrosphère Developers + * + * Licensed under the Apache License, Version 2.0 or the MIT license + * , at your + * option. This file may not be copied, modified, or distributed + * except according to those terms. + */ + +#pragma once + +#include + +namespace hs::mem { +/** + * \short A smart pointer that owns and manages another object through a pointer + * and disposes of that object when the unique_ptr goes out of scope. + */ +template > +class unique_ptr { + public: + typedef T* pointer; + typedef T element_type; + typedef Deleter deleter_type; + + /** + * \short Constructs a new unique_ptr from a nullptr. + */ + unique_ptr() noexcept : native_ptr_(nullptr), deleter_() {} + + /** + * \short Constructs a new unique_ptr given a raw pointer. + * \param[in] ptr The pointer to be wrapped. + */ + explicit unique_ptr(T *ptr) noexcept : native_ptr_(ptr), deleter_() {} + + /** + * \short Constructs a new unique_ptr given a raw pointer and a destructor. + * \param[in] ptr The pointer to be wrapped. + * \param[in] d The destructor to use when disposing of the pointer. + */ + explicit unique_ptr(T *ptr, Deleter d) noexcept : native_ptr_(ptr), + deleter_(d) {} + + /** + * \short Constructs a new unique_ptr from an existing unique_ptr. + * \param[in] ptr The unique_ptr to use. + */ + unique_ptr(const unique_ptr &ptr) noexcept : native_ptr_(ptr.native_ptr_), + deleter_(ptr.deleter_) { + const_cast(ptr).native_ptr_ = + nullptr; // const_cast to force ownership transfer. + } + + /** + * \short Destructs the managed object if such is present. + */ + inline ~unique_ptr() noexcept { destroy(); } + + /** + * \short Assigns the unique_ptr. + * \param[in] ptr The unique_ptr that should be assigned. + */ + unique_ptr &operator=(unique_ptr ptr) noexcept { + swap(ptr); + return *this; + } + + /** + * \short Destroys the managed object. + */ + inline void reset() noexcept { destroy(); } + + /** + * \short Replaces the managed object. + * \param[in] ptr The pointer to replace the contents with. + */ + void reset(pointer ptr) noexcept { + destroy(); + native_ptr_ = ptr; + } + + /** + * Swaps the managed objects. + * \param[in] ptr The pointer to exchange the contents with. + */ + void swap(unique_ptr &ptr) noexcept { + auto temp = native_ptr_; + native_ptr_ = ptr.native_ptr_; + ptr.native_ptr_ = temp; + } + + /** + * \short Returns a pointer to the managed object and releases the + * ownership. + */ + inline pointer release() noexcept { + pointer temp = native_ptr_; + native_ptr_ = nullptr; + return temp; + } + + /** + * \short Checks if there is an associated managed object. + */ + inline explicit operator bool() const noexcept { + return nullptr != native_ptr_; + } + + // Underlying native_ptr_ operations: + + /** + * \short Dereferences pointer to the managed object. + */ + inline T &operator*() const noexcept { + return *native_ptr_; + } + + /** + * \short Provides access to a pointer to managed object. + */ + inline T *operator->() const noexcept { + return native_ptr_; + } + + /** + * \short Returns a pointer to the managed object. + */ + inline pointer get() const noexcept { return native_ptr_; } + + private: + pointer native_ptr_; + deleter_type deleter_; + + inline void destroy() noexcept { + deleter_(native_ptr_); + + native_ptr_ = nullptr; + } + + inline void release() const noexcept { native_ptr_ = nullptr; } +}; + +// Comparison operators. + +template +inline bool operator==(const unique_ptr &l, + const unique_ptr &r) noexcept { + return l.get() == r.get(); +} + +template +inline bool operator!=(const unique_ptr &l, + const unique_ptr &r) noexcept { + return l.get() != r.get(); +} + +template +inline bool operator<=(const unique_ptr &l, + const unique_ptr &r) noexcept { + return l.get() <= r.get(); +} + +template +inline bool operator<(const unique_ptr &l, const unique_ptr &r) noexcept { + return l.get() < r.get(); +} + +template +inline bool operator>=(const unique_ptr &l, + const unique_ptr &r) noexcept { + return l.get() >= r.get(); +} + +template +inline bool operator>(const unique_ptr &l, const unique_ptr &r) noexcept { + return l.get() > r.get(); +} +} // namespace hs::mem diff --git a/libhydrosphere/include/hs/os/os_api.hpp b/libhydrosphere/include/hs/os/os_api.hpp index 7ca4cb4..150942f 100644 --- a/libhydrosphere/include/hs/os/os_api.hpp +++ b/libhydrosphere/include/hs/os/os_api.hpp @@ -26,7 +26,6 @@ namespace hs::os { */ svc::Handle GetCurrentThreadHandle() noexcept; - /** * \short Get the pseudo thread handle. * \return The pseudo thread handle. diff --git a/libhydrosphere/include/hs/os/os_barrier_api.hpp b/libhydrosphere/include/hs/os/os_barrier_api.hpp index e48778d..fb02954 100644 --- a/libhydrosphere/include/hs/os/os_barrier_api.hpp +++ b/libhydrosphere/include/hs/os/os_barrier_api.hpp @@ -28,7 +28,7 @@ namespace hs::os { /** * \short This is the context of a barrier. - * + * * See \ref barrier_api "Barrier API" for usages. **/ struct Barrier { @@ -72,11 +72,12 @@ static_assert(hs::util::is_pod::value, "Barrier isn't pod"); /** * \short Initialize a Barrier for \c num_to_wait participating threads. * - * \remark The set of participating threads is the first \c num_to_wait threads to arrive at the synchronization point. + * \remark The set of participating threads is the first \c num_to_wait threads + * to arrive at the synchronization point. * * \param[in] barrier A pointer to a Barrier. * \param[in] number_to_wait The number of threads this barrier need to wait on. - * + * * \pre ``number_to_wait`` > 0 * \pre ``barrier`` is uninitialized. * \post ``barrier`` is initialized. @@ -87,7 +88,7 @@ void InitializeBarrier(Barrier *barrier, uint32_t number_to_wait) noexcept; * \short Blocks and arrive at the barrier's synchronization point. * * \param[in] barrier A pointer to a Barrier. - * + * * \pre ``barrier`` is initialized. * \post The thread arrived at the barrier's synchronization point. */ @@ -97,7 +98,7 @@ void AwaitBarrier(Barrier *barrier) noexcept; * \short Finalize a Barrier. * * \param[in] barrier A pointer to a Barrier. - * + * * \pre ``barrier`` is initialized. * \post ``barrier`` is uninitialized. */ diff --git a/libhydrosphere/include/hs/os/os_condition_variable_api.hpp b/libhydrosphere/include/hs/os/os_condition_variable_api.hpp index a3ef9f0..23371f2 100644 --- a/libhydrosphere/include/hs/os/os_condition_variable_api.hpp +++ b/libhydrosphere/include/hs/os/os_condition_variable_api.hpp @@ -28,7 +28,7 @@ namespace hs::os { /** * \short This is the context of a condition variable. - * + * * See \ref condition_variable_api "Condition Variable API" for usages. **/ struct ConditionVariable { @@ -51,7 +51,8 @@ struct ConditionVariable { }; /** - * \short Type that indicates whether a hs::os::WaitTimeoutConditionVariable returned because of a timeout or not. + * \short Type that indicates whether a hs::os::WaitTimeoutConditionVariable + * returned because of a timeout or not. */ enum class ConditionVariableStatus { /** @@ -65,10 +66,10 @@ enum class ConditionVariableStatus { }; static_assert(sizeof(ConditionVariable) == 0x8, -"invalid ConditionVariable size"); + "invalid ConditionVariable size"); static_assert(hs::util::is_pod::value, -"ConditionVariable isn't pod"); + "ConditionVariable isn't pod"); /** * \short Initialize a ConditionVariable. @@ -109,12 +110,14 @@ void BroadcastConditionVariable(ConditionVariable *condvar) noexcept; * \param[in] mutex A pointer to a locked Mutex. * \param[in] timeout The number of nanoseconds before timing out. * - * The execution of the current thread is blocked during ``timeout``, or until signaled (if the latter happens first). - * \pre ``condvar`` must be initialized and ``mutex`` must be locked. - * \post The thread was signaled and was unblocked, or, the ``timeout`` expired. + * The execution of the current thread is blocked during ``timeout``, or until + * signaled (if the latter happens first). \pre ``condvar`` must be initialized + * and ``mutex`` must be locked. \post The thread was signaled and was + * unblocked, or, the ``timeout`` expired. */ -ConditionVariableStatus WaitTimeoutConditionVariable( - ConditionVariable *condvar, Mutex *mutex, int64_t timeout) noexcept; +ConditionVariableStatus WaitTimeoutConditionVariable(ConditionVariable *condvar, + Mutex *mutex, + int64_t timeout) noexcept; /** * \short Wait until signaled. * @@ -125,8 +128,7 @@ ConditionVariableStatus WaitTimeoutConditionVariable( * \pre ``condvar`` must be initialized and ``mutex`` must be locked. * \post The thread was signaled and was unblocked. */ -void WaitConditionVariable(ConditionVariable *condvar, - Mutex *mutex) noexcept; +void WaitConditionVariable(ConditionVariable *condvar, Mutex *mutex) noexcept; /** * \short Finalize a ConditionVariable. diff --git a/libhydrosphere/include/hs/os/os_condition_variable_impl.hpp b/libhydrosphere/include/hs/os/os_condition_variable_impl.hpp index 91390a0..4b78f4a 100644 --- a/libhydrosphere/include/hs/os/os_condition_variable_impl.hpp +++ b/libhydrosphere/include/hs/os/os_condition_variable_impl.hpp @@ -19,8 +19,9 @@ namespace hs::os { /** * \short Condition Variable implementation. - * - * \remark A condition variable is an object able to block the calling thread until notified to resume. + * + * \remark A condition variable is an object able to block the calling thread + * until notified to resume. */ class ConditionVariableImpl { private: @@ -29,35 +30,38 @@ class ConditionVariableImpl { public: /** * \short Signal one. - * + * * Unblocks one of the threads currently waiting for this condition. */ void Signal(void) noexcept; /** * \short Signal all (Broadcast). - * + * * Unblocks all threads currently waiting for this condition. */ void Broadcast(void) noexcept; /** * \short Wait until signaled. - * - * The execution of the current thread (which shall have entered the critial section) is blocked until signaled. + * + * The execution of the current thread (which shall have entered the critial + * section) is blocked until signaled. */ void Wait(CriticalSection *critical_section) noexcept; /** * \short Wait for timeout or until signaled. - * - * The execution of the current thread (which shall have entered the critial section) is blocked during \c timeout, or until signaled (if the latter happens first). + * + * The execution of the current thread (which shall have entered the critial + * section) is blocked during \c timeout, or until signaled (if the latter + * happens first). */ bool WaitTimeout(CriticalSection *critical_section, int64_t timeout) noexcept; }; static_assert(hs::util::is_pod::value, -"ConditionVariableImpl isn't pod"); + "ConditionVariableImpl isn't pod"); } // namespace hs::os diff --git a/libhydrosphere/include/hs/os/os_critical_section.hpp b/libhydrosphere/include/hs/os/os_critical_section.hpp index 069cb22..bfe8579 100644 --- a/libhydrosphere/include/hs/os/os_critical_section.hpp +++ b/libhydrosphere/include/hs/os/os_critical_section.hpp @@ -19,7 +19,7 @@ namespace hs::os { /** * \short Critical Section implementation. - * + * * \remark CriticalSection ensures mutual exclusion of access. */ class CriticalSection { @@ -42,13 +42,14 @@ class CriticalSection { */ void Leave() noexcept; - /** - * \short Returns true if the current thread owns the lock of the critical section. - */ + /** + * \short Returns true if the current thread owns the lock of the critical + * section. + */ bool IsLockedByCurrentThread() noexcept; }; static_assert(hs::util::is_pod::value, -"CriticalSection isn't pod"); + "CriticalSection isn't pod"); } // namespace hs::os diff --git a/libhydrosphere/include/hs/os/os_kernel_event_api.hpp b/libhydrosphere/include/hs/os/os_kernel_event_api.hpp index 2e32710..c0cfcd6 100644 --- a/libhydrosphere/include/hs/os/os_kernel_event_api.hpp +++ b/libhydrosphere/include/hs/os/os_kernel_event_api.hpp @@ -14,8 +14,8 @@ #include #include #include -#include #include +#include namespace hs::os { /** @@ -30,7 +30,7 @@ namespace hs::os { /** * \short This is the context of a kernel event. - * + * * See \ref kernel_event_api "Kernel Event API" for usages. **/ struct KernelEvent { @@ -42,7 +42,8 @@ struct KernelEvent { /** * \private - * \short True if the KernelEvent must be automatically cleared after a wait operation. + * \short True if the KernelEvent must be automatically cleared after a wait + * operation. */ bool is_auto_clear; @@ -66,27 +67,26 @@ struct KernelEvent { }; static_assert(sizeof(KernelEvent) == 0x14, "invalid KernelEvent size"); -static_assert(hs::util::is_pod::value, - "KernelEvent isn't pod"); +static_assert(hs::util::is_pod::value, "KernelEvent isn't pod"); /** * \short Create a KernelEvent. * * \param[in] event A pointer to a KernelEvent. - * \param[in] is_auto_clear True if the KernelEvent must be automatically cleared after a wait operation. + * \param[in] is_auto_clear True if the KernelEvent must be automatically + * cleared after a wait operation. * * \pre ``event`` is uninitialized. * \post ``event`` is initialized. */ -hs::Result CreateKernelEvent(KernelEvent *event, - bool is_auto_clear) noexcept; +hs::Result CreateKernelEvent(KernelEvent *event, bool is_auto_clear) noexcept; /** * \short Load a KernelEvent from raw handles. - * + * * \param[in] event A pointer to a KernelEvent. * \param[in] readable_handle An Optional readable handle. * \param[in] writable_handle An Optional writable handle. - * + * * \pre ``event`` is uninitialized. * \pre ``readable_handle`` or ``writable_handle`` contain a value. * \post ``event`` is initialized. @@ -108,7 +108,7 @@ void WaitKernelEvent(KernelEvent *event) noexcept; /** * \short Get the signal state of a KernelEvent. - * + * * \param[in] event A pointer to a KernelEvent. * \pre ``event`` is initialized. * \pre ``event`` contains a readable handle. diff --git a/libhydrosphere/include/hs/os/os_mutex_api.hpp b/libhydrosphere/include/hs/os/os_mutex_api.hpp index 6ada789..9b9f64d 100644 --- a/libhydrosphere/include/hs/os/os_mutex_api.hpp +++ b/libhydrosphere/include/hs/os/os_mutex_api.hpp @@ -13,8 +13,8 @@ #include #include #include -#include #include +#include namespace hs::os { /** @@ -28,7 +28,7 @@ namespace hs::os { /** * \short This is the context of a mutex. - * + * * See \ref mutex_api "Mutex API" for usages. **/ struct Mutex { @@ -87,7 +87,8 @@ void InitializeMutex(Mutex *mutex, bool is_recursive) noexcept; * \short Lock Mutex. * The calling thread locks the mutex, blocking if necessary. * - * \remark If the mutex wasn't initialized as recursive and if it is currently locked by the same thread calling this function, it produces a deadlock. + * \remark If the mutex wasn't initialized as recursive and if it is currently + * locked by the same thread calling this function, it produces a deadlock. * * \param[in] mutex A pointer to a Mutex. * @@ -104,7 +105,8 @@ void LockMutex(Mutex *mutex) noexcept; * \pre ``mutex`` is initialized. * \post The lock was acquired or is owned by another thread. * - * \return true if the function succeeds in locking the mutex for the thread. false otherwise. + * \return true if the function succeeds in locking the mutex for the thread. + * false otherwise. */ bool TryLockMutex(Mutex *mutex) noexcept; diff --git a/libhydrosphere/include/hs/os/os_thread_api.hpp b/libhydrosphere/include/hs/os/os_thread_api.hpp index 1cbb9e7..d701424 100644 --- a/libhydrosphere/include/hs/os/os_thread_api.hpp +++ b/libhydrosphere/include/hs/os/os_thread_api.hpp @@ -43,7 +43,8 @@ const size_t THREAD_NAME_SIZE = 0x20; */ enum class ThreadState { /** - * \short The Thread is uninitialized and can be initialized by using hs::os::CreateThread. + * \short The Thread is uninitialized and can be initialized by using + * hs::os::CreateThread. */ Uninitialized, /** @@ -66,7 +67,7 @@ enum class ThreadState { /** * \short This is the context of a thread. - * + * * See \ref thread_api "Thread API" for usages. **/ struct Thread : public hs::util::IntrusiveListElement<> { @@ -108,13 +109,15 @@ struct Thread : public hs::util::IntrusiveListElement<> { /** * \private - * \short The memory mirror of the ``original_thread_stack`` mapped in the Stack region. + * \short The memory mirror of the ``original_thread_stack`` mapped in the + * Stack region. */ void *mapped_thread_stack; /** * \private - * \short True if the ``original_thread_stack`` is mapped in the Stack region. + * \short True if the ``original_thread_stack`` is mapped in the Stack + * region. */ bool is_alias_thread_stack_mapped; @@ -148,11 +151,14 @@ struct Thread : public hs::util::IntrusiveListElement<> { * * \param[in] thread A pointer to a Thread. * \param[in] thread_entrypoint The entrypoint of the Thread. - * \param[in] argument The argument to pass to the entrypoint when starting the Thread. - * \param[in] stack A pointer to a memory region that will be used as a stack by the Thread. - * \param[in] stack_size The size of the stack (must be page aligned). - * \param[in] priority The priority of the Thread (0x2C is the usual priority of the main thread. 0x3B on core 0 to 2 and 0x3F on core 3 is a special priority that enables preemptive multithreading). - * \param[in] cpuid The ID of the CPU core to use (-2 means the default core of the current process). + * \param[in] argument The argument to pass to the entrypoint when starting the + * Thread. \param[in] stack A pointer to a memory region that will be used as a + * stack by the Thread. \param[in] stack_size The size of the stack (must be + * page aligned). \param[in] priority The priority of the Thread (0x2C is the + * usual priority of the main thread. 0x3B on core 0 to 2 and 0x3F on core 3 is + * a special priority that enables preemptive multithreading). \param[in] cpuid + * The ID of the CPU core to use (-2 means the default core of the current + * process). * * \pre ``thread`` state is ThreadState::Uninitialized. * \pre ``thread_entrypoint`` is not a null pointer. @@ -173,8 +179,10 @@ hs::Result CreateThread(Thread *thread, * * This function has the following behaviours: * - * - If the ``thread`` state is ThreadState::Initialized, the thread is started and signaled as ThreadState::Destroyed (The thread will imediately exit). - * - It waits for the Thread to exit and ensures that the Thread stack mirror is unmapped if needed. + * - If the ``thread`` state is ThreadState::Initialized, the thread is started + * and signaled as ThreadState::Destroyed (The thread will imediately exit). + * - It waits for the Thread to exit and ensures that the Thread stack mirror is + * unmapped if needed. * * \param[in] thread A pointer to a Thread. * \pre ``thread`` state is **not** ThreadState::Uninitialized. @@ -203,7 +211,8 @@ void WaitThread(Thread *thread) noexcept; /** * \short Yield to other threads. * - * This gives a hint to the scheduler that the current thread is willing to yield its current use of a CPU core. + * This gives a hint to the scheduler that the current thread is willing to + * yield its current use of a CPU core. */ void YieldThread(void) noexcept; @@ -228,7 +237,7 @@ Thread *GetCurrentThread(void) noexcept; /** * \short Change a Thread priority. - * + * * This return the previous thread priority. * \pre ``thread`` state is **not** ThreadState::Uninitialized. * \post ``thread`` priority is now ``priority``. @@ -242,7 +251,6 @@ int ChangeThreadPriority(Thread *thread, int priority) noexcept; */ int GetOriginalThreadPriority(Thread *thread) noexcept; - /** * \short Get the current Thread priority. * diff --git a/libhydrosphere/include/hs/os/os_tls.hpp b/libhydrosphere/include/hs/os/os_tls.hpp index 751c627..41d258e 100644 --- a/libhydrosphere/include/hs/os/os_tls.hpp +++ b/libhydrosphere/include/hs/os/os_tls.hpp @@ -35,7 +35,6 @@ class ThreadLocalStorage { */ static ThreadLocalStorage *GetThreadLocalStorage() noexcept; - /** * Get the thread context of the current thread. */ diff --git a/libhydrosphere/include/hs/os/os_user_event_api.hpp b/libhydrosphere/include/hs/os/os_user_event_api.hpp index 360404b..69e9849 100644 --- a/libhydrosphere/include/hs/os/os_user_event_api.hpp +++ b/libhydrosphere/include/hs/os/os_user_event_api.hpp @@ -29,7 +29,7 @@ namespace hs::os { /** * \short This is the context of a user event. - * + * * See \ref user_event_api "User Event API" for usages. **/ struct UserEvent { @@ -53,7 +53,8 @@ struct UserEvent { /** * \private - * \short True if the UserEvent must be automatically cleared after a wait operation. + * \short True if the UserEvent must be automatically cleared after a wait + * operation. */ bool is_auto_clear; @@ -77,8 +78,9 @@ static_assert(hs::util::is_pod::value, "UserEvent isn't pod"); * \short Create a UserEvent. * * \param[in] event A pointer to a UserEvent. - * \param[in] is_signaled_at_init True if the UserEvent must be automatically signaled after the initialization. - * \param[in] is_auto_clear True if the UserEvent must be automatically cleared after a wait operation. + * \param[in] is_signaled_at_init True if the UserEvent must be automatically + * signaled after the initialization. \param[in] is_auto_clear True if the + * UserEvent must be automatically cleared after a wait operation. * * \pre ``event`` is uninitialized. * \post ``event`` is initialized. @@ -97,7 +99,7 @@ void WaitUserEvent(UserEvent *event) noexcept; /** * \short Get the signal state of a UserEvent. - * + * * \param[in] event A pointer to a UserEvent. * \pre ``event`` is initialized. * \return true if the ``event`` has been signaled. diff --git a/libhydrosphere/include/hs/svc/svc_api.hpp b/libhydrosphere/include/hs/svc/svc_api.hpp index e8c1d5e..a92c484 100644 --- a/libhydrosphere/include/hs/svc/svc_api.hpp +++ b/libhydrosphere/include/hs/svc/svc_api.hpp @@ -21,6 +21,13 @@ namespace hs { namespace svc { + +inline hs::Result SetHeapSize(uintptr_t *out_address, + size_t heap_size) noexcept { + return hs::svc::HYDROSPHERE_TARGET_ARCH_NAME::SetHeapSize(out_address, + heap_size); +} + inline hs::Result ArbitrateLock(hs::svc::Handle owner_thread_handle, uintptr_t lock_address, hs::svc::Handle requester_handle) noexcept { diff --git a/libhydrosphere/include/hs/svc/svc_types.hpp b/libhydrosphere/include/hs/svc/svc_types.hpp index 01fecdd..36ff2b5 100644 --- a/libhydrosphere/include/hs/svc/svc_types.hpp +++ b/libhydrosphere/include/hs/svc/svc_types.hpp @@ -58,7 +58,6 @@ class Handle { static_assert(hs::util::is_pod::value, "Handle isn't pod"); - inline bool operator==(Handle a, Handle b) noexcept { return a.GetValue() == b.GetValue(); } @@ -113,6 +112,8 @@ enum class InterruptType { // TODO(Kaenbyō): populate this enum class InfoType { + HeapRegionBaseAddr = 4, + HeapRegionSize = 5, AddressSpaceBase = 12, AddressSpaceSize = 13, StackRegionBase = 14, diff --git a/libhydrosphere/include/hs/util/util_api.hpp b/libhydrosphere/include/hs/util/util_api.hpp index 9fe3847..68f30b5 100644 --- a/libhydrosphere/include/hs/util/util_api.hpp +++ b/libhydrosphere/include/hs/util/util_api.hpp @@ -55,7 +55,7 @@ inline int SNPrintf(char *str, size_t size, const char *format, ...) noexcept { /** * \def __HS_DEBUG_LOG(...) * \short printf helper to hs::svc::OutputDebugString -*/ + */ #define __HS_DEBUG_LOG(...) \ { \ char log_buf[0x200]; \ @@ -66,7 +66,7 @@ inline int SNPrintf(char *str, size_t size, const char *format, ...) noexcept { /** * \def __HS_DEBUG_LOG_VARGS(...) * \short vprintf helper to hs::svc::OutputDebugString -*/ + */ #define __HS_DEBUG_LOG_VARGS(format, va_args) \ { \ char log_buf[0x200]; \ diff --git a/libhydrosphere/include/hs/util/util_intrusive_list.hpp b/libhydrosphere/include/hs/util/util_intrusive_list.hpp index 4878975..1118f62 100644 --- a/libhydrosphere/include/hs/util/util_intrusive_list.hpp +++ b/libhydrosphere/include/hs/util/util_intrusive_list.hpp @@ -16,12 +16,14 @@ namespace hs::util { /** - * \short default_intrusive_tag as defined in [P0406-r1](http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0406r1.html#list) + * \short default_intrusive_tag as defined in + * [P0406-r1](http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0406r1.html#list) */ struct DefaultIntrusiveTag; /** - * \short intrusive_list_element as defined in [P0406-r1](http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0406r1.html#list) + * \short intrusive_list_element as defined in + * [P0406-r1](http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0406r1.html#list) */ template class IntrusiveListElement { @@ -108,7 +110,8 @@ class IntrusiveListElement { }; /** - * \short Partial implementation of intrusive_list as defined in [P0406-r1](http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0406r1.html#list) + * \short Partial implementation of intrusive_list as defined in + * [P0406-r1](http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0406r1.html#list) * \remark This doesn't implement any methods related to the std namespace. */ template @@ -127,7 +130,8 @@ class IntrusiveList { typedef value_type& reference; /** - * \short The type of a const reference to an element the iterator can point to. + * \short The type of a const reference to an element the iterator can point + * to. */ typedef const value_type& const_reference; @@ -140,7 +144,8 @@ class IntrusiveList { typedef int size_type; /** - * \short Type to express the result of subtracting one iterator from another. + * \short Type to express the result of subtracting one iterator from + * another. */ typedef int difference_type; // We don't have the standard library. @@ -276,24 +281,28 @@ class IntrusiveList { // Iterators /** - * \short Returns an iterator pointing to the first element in the container. + * \short Returns an iterator pointing to the first element in the + * container. */ iterator begin() noexcept { return iterator(root.GetNext()); } /** - * \short Returns an const_iterator pointing to the first element in the container. + * \short Returns an const_iterator pointing to the first element in the + * container. */ const_iterator begin() const noexcept { return const_iterator(root.GetNext()); } /** - * \short Returns an iterator referring to the past-the-end element in the container. + * \short Returns an iterator referring to the past-the-end element in the + * container. */ iterator end() noexcept { return iterator(&root); } /** - * \short Returns an const_iterator referring to the past-the-end element in the container. + * \short Returns an const_iterator referring to the past-the-end element in + * the container. */ const_iterator end() const noexcept { return const_iterator(&root); } @@ -304,17 +313,21 @@ class IntrusiveList { const_reverse_iterator rend() const noexcept;*/ /** - * \short Returns a const_iterator pointing to the first element in the container. - * - * \warning If the container is empty, the returned iterator value shall not be dereferenced. + * \short Returns a const_iterator pointing to the first element in the + * container. + * + * \warning If the container is empty, the returned iterator value shall not + * be dereferenced. */ const_iterator cbegin() const noexcept { return this->begin(); } /** - * \short Returns a const_iterator pointing to the past-the-end element in the container. - * - * If the container is empty, this function returns the same as IntrusiveList::cbegin. - * \warning The value returned shall not be dereferenced. + * \short Returns a const_iterator pointing to the past-the-end element in + * the container. + * + * If the container is empty, this function returns the same as + * IntrusiveList::cbegin. \warning The value returned shall not be + * dereferenced. */ const_iterator cend() const noexcept { return this->end(); } // We don't have the standard library. @@ -341,8 +354,9 @@ class IntrusiveList { /** * \short Returns a reference to the first element in the container. * - * Unlike member IntrusiveList::begin, which returns an iterator to this same element, this function returns a direct reference. - * \warning Calling this function on an empty container causes undefined behavior. + * Unlike member IntrusiveList::begin, which returns an iterator to this + * same element, this function returns a direct reference. \warning Calling + * this function on an empty container causes undefined behavior. */ reference front() noexcept { return IntrusiveListElement::template GetElement( @@ -352,8 +366,9 @@ class IntrusiveList { /** * \short Returns a reference to the first element in the container. * - * Unlike member IntrusiveList::cbegin, which returns an iterator to this same element, this function returns a direct reference. - * \warning Calling this function on an empty container causes undefined behavior. + * Unlike member IntrusiveList::cbegin, which returns an iterator to this + * same element, this function returns a direct reference. \warning Calling + * this function on an empty container causes undefined behavior. */ const_reference front() const noexcept { return IntrusiveListElement::template GetElement( @@ -363,8 +378,9 @@ class IntrusiveList { /** * \short Returns a reference to the last element in the container. * - * Unlike member IntrusiveList::cbegin, which returns an iterator to this same element, this function returns a direct reference. - * \warning Calling this function on an empty container causes undefined behavior. + * Unlike member IntrusiveList::cbegin, which returns an iterator to this + * same element, this function returns a direct reference. \warning Calling + * this function on an empty container causes undefined behavior. */ reference back() noexcept { return IntrusiveListElement::template GetElement( @@ -374,8 +390,9 @@ class IntrusiveList { /** * \short Returns a reference to the last element in the container. * - * Unlike member IntrusiveList::cbegin, which returns an iterator to this same element, this function returns a direct reference. - * \warning Calling this function on an empty container causes undefined behavior. + * Unlike member IntrusiveList::cbegin, which returns an iterator to this + * same element, this function returns a direct reference. \warning Calling + * this function on an empty container causes undefined behavior. */ const_reference back() const noexcept { return IntrusiveListElement::template GetElement( @@ -383,31 +400,37 @@ class IntrusiveList { } /** - * \short Adds a new element at the start of the container after its current first element. - * + * \short Adds a new element at the start of the container after its current + * first element. + * * \remark This effectively increases the container size by one. */ void push_front(T& x) noexcept { this->root.LinkNext(&x); } /** - * \short Adds a new element at the end of the container after its current last element. - * + * \short Adds a new element at the end of the container after its current + * last element. + * * \remark This effectively increases the container size by one. */ void push_back(T& x) noexcept { this->root.LinkPrev(&x); } /** - * \short Removes the last element in the container, effectively reducing the container size by one. + * \short Removes the last element in the container, effectively reducing + * the container size by one. */ void pop_back() noexcept { this->root.GetPrev()->Unlink(); } /** - * \short Removes the first element in the container, effectively reducing the container size by one. + * \short Removes the first element in the container, effectively reducing + * the container size by one. */ void pop_front() noexcept { this->root.GetNext()->Unlink(); } /** - * \short The container is extended by inserting new elements before the element at the specified position, effectively increasing the container size by the number of elements inserted. + * \short The container is extended by inserting new elements before the + * element at the specified position, effectively increasing the container + * size by the number of elements inserted. */ iterator insert(const_iterator position, T& x) noexcept { T* ref = const_cast(position.value); @@ -431,11 +454,12 @@ class IntrusiveList { /** * \short Removes from the container a range of elements. ([first,last)) - * \remark This effectively reduces the container size by the number of elements removed. + * \remark This effectively reduces the container size by the number of + * elements removed. */ iterator erase(const_iterator position, const_iterator last) noexcept { if (position == last) { - return last; + return iterator(last); } iterator temp = iterator(position); @@ -445,7 +469,8 @@ class IntrusiveList { // void swap(IntrusiveList&) noexcept; /** - * \short Removes all elements from the container, leaving the container with a size of 0. + * \short Removes all elements from the container, leaving the container + * with a size of 0. */ void clear() noexcept { while (!this->empty()) { @@ -465,7 +490,15 @@ class IntrusiveList { void splice(const_iterator position, IntrusiveList&& x, const_iterator first, const_iterator last) noexcept;*/ - // void remove(const T& value); + void remove(const T& value) { + const_iterator v = cbegin(); + for (; v != cend(); ++v) { + if (&value == &*v) { + erase(v); + break; + } + } + } // template void remove_if(Predicate pred); // maybe uneeded as we don't have the standard library. diff --git a/libhydrosphere/include/hs/util/util_object_storage.hpp b/libhydrosphere/include/hs/util/util_object_storage.hpp index 160c474..c009f98 100644 --- a/libhydrosphere/include/hs/util/util_object_storage.hpp +++ b/libhydrosphere/include/hs/util/util_object_storage.hpp @@ -14,21 +14,23 @@ namespace hs::util { /** - * \short Obtains a POD type suitable to use as storage for an object of a size of at most Len bytes, aligned as specified by Align. - * + * \short Obtains a POD type suitable to use as storage for an object of a size + * of at most Len bytes, aligned as specified by Align. + * * The obtained type is aliased as member type aligned_storage::type. - * + * * \tparam Len The size of the storage object in bytes. * \tparam Align The alignment requested in bytes. - * + * * \pre Len shall not be zero. - * + * * \remark This is identical to libcxx's aligned_storage. */ template struct aligned_storage { /** - * \short A POD type suitable to store Len bytes, aligned as specified by Align. + * \short A POD type suitable to store Len bytes, aligned as specified by + * Align. */ struct type { alignas(Align) unsigned char data[Len]; @@ -77,7 +79,6 @@ struct ObjectStorage { inline T &operator*() noexcept { return Get(); } - inline T *operator->() noexcept { return GetPointer(); } }; } // namespace hs::util diff --git a/libhydrosphere/include/hs/util/util_optional.hpp b/libhydrosphere/include/hs/util/util_optional.hpp index b99605a..a8abcdc 100644 --- a/libhydrosphere/include/hs/util/util_optional.hpp +++ b/libhydrosphere/include/hs/util/util_optional.hpp @@ -18,7 +18,8 @@ namespace hs::util { * \short Obtains a POD type that manages an optional contained value. * \tparam T The object type that needs to be hold. */ -template< typename T> class Optional { +template +class Optional { private: /** * \short True if the Optional contains a value. @@ -35,21 +36,16 @@ template< typename T> class Optional { ObjectStorage value; public: - constexpr explicit operator bool() const noexcept { - return HasValue(); - } - + constexpr explicit operator bool() const noexcept { return HasValue(); } /** * \short Check whether the Optional contains a value. - */ - constexpr bool HasValue() const noexcept { - return has_value; - } + */ + constexpr bool HasValue() const noexcept { return has_value; } /** * \short Store the given value in the Optional. - * + * * \param[in] new_value the new value to move inside the Optional. */ constexpr void SetValue(T new_value) noexcept { diff --git a/libhydrosphere/include/hs/util/util_std_new.hpp b/libhydrosphere/include/hs/util/util_std_new.hpp index 8c211f6..9e5ef42 100644 --- a/libhydrosphere/include/hs/util/util_std_new.hpp +++ b/libhydrosphere/include/hs/util/util_std_new.hpp @@ -17,5 +17,5 @@ inline void* operator new(size_t, void* __p) throw() { return __p; } inline void* operator new[](size_t, void* __p) throw() { return __p; } // Default placement versions of operator delete. -inline void operator delete(void*, void*)throw() {} +inline void operator delete(void*, void*) throw() {} inline void operator delete[](void*, void*) throw() {} diff --git a/libhydrosphere/include/hs/util/util_template_api.hpp b/libhydrosphere/include/hs/util/util_template_api.hpp index 1f4d52c..8583c31 100644 --- a/libhydrosphere/include/hs/util/util_template_api.hpp +++ b/libhydrosphere/include/hs/util/util_template_api.hpp @@ -14,9 +14,10 @@ namespace hs::util { /** * \short integral_constant wraps a static constant of specified type. * \remark This is a simple implementation of std::integral_constant. - * \remark It is the base class for the C++ type traits. + * \remark It is the base class for the C++ type traits. */ -template struct integral_constant { +template +struct integral_constant { static constexpr T value = v; typedef T value_type; // using injected-class-name @@ -27,11 +28,24 @@ template struct integral_constant { }; /** - * \short If T is a plain old data type, that is, both trivial and standard-layout, provides the member constant value equal true. For any other type, value is false. - * \remark This is a simple implementation of std::is_pod using __is_pod from clang builtins. + * \short If T is a plain old data type, that is, both trivial and + * standard-layout, provides the member constant value equal true. For any other + * type, value is false. \remark This is a simple implementation of std::is_pod + * using __is_pod from clang builtins. */ -template struct is_pod : public integral_constant -{}; +template +struct is_pod : public integral_constant {}; -} // namespace hs::util +template< class T > struct remove_reference {typedef T type;}; +template< class T > struct remove_reference {typedef T type;}; +template< class T > struct remove_reference {typedef T type;}; + +template T&& forward(typename remove_reference::type& t) noexcept { + return static_cast(t); +} +template T&& forward(typename remove_reference::type&& t) + noexcept { + return static_cast(t); +} +} // namespace hs::util diff --git a/libhydrosphere/source/common/compiler/memcpy.cpp b/libhydrosphere/source/common/compiler/memcpy.cpp index 4df446b..7f1f6de 100644 --- a/libhydrosphere/source/common/compiler/memcpy.cpp +++ b/libhydrosphere/source/common/compiler/memcpy.cpp @@ -11,10 +11,15 @@ #include #include -// We define memcpy as we don't have any libraries that can provide it. -// If there is any, as this is weak, it's going to be discared. -extern "C" __HS_ATTRIBUTE_WEAK void *memcpy( - void *dst, const void *src, size_t len) { +/** + * \short Copies len bytes from src directly to dst. + * \param[out] dst Pointer to the destination array where the content is to be + * copied. + * \param[in] src Pointer to the source of data to be copied. \param[in] + * len Number of bytes to copy. + */ +extern "C" __HS_ATTRIBUTE_WEAK void *memcpy(void *dst, const void *src, + size_t len) { const char *from = (const char *)src; char *to = reinterpret_cast(dst); @@ -22,8 +27,13 @@ extern "C" __HS_ATTRIBUTE_WEAK void *memcpy( return dst; } -extern "C" __HS_ATTRIBUTE_WEAK void *memset( - void *s, int c, size_t n) { +/** + * \short Sets the first n bytes of s to c (interpreted as an unsigned char). + * \param[out] s Pointer to the block of memory to fill. + * \param[in] c Value to be set. Will be casted to unsigned char. + * \param[in] n Number of bytes to be set to the value. + */ +extern "C" __HS_ATTRIBUTE_WEAK void *memset(void *s, int c, size_t n) { unsigned char *p = reinterpret_cast(s); while (n--) *p++ = (unsigned char)c; return s; diff --git a/libhydrosphere/source/common/init/initialization.cpp b/libhydrosphere/source/common/init/initialization.cpp index cc89c34..4eea95a 100644 --- a/libhydrosphere/source/common/init/initialization.cpp +++ b/libhydrosphere/source/common/init/initialization.cpp @@ -42,7 +42,7 @@ __HS_ATTRIBUTE_VISIBILITY_HIDDEN void InitMainThread( tls_storage->SetThreadContext(&thread_list->GetMainThread()); } -extern"C" void hsMain(void); +extern "C" void hsMain(void); void hs::init::Start(uint64_t thread_handle, uintptr_t argument_address, void (*notify_exception_handler_ready)(), diff --git a/libhydrosphere/source/common/os/os_condition_variable_api.cpp b/libhydrosphere/source/common/os/os_condition_variable_api.cpp index c836285..b73198c 100644 --- a/libhydrosphere/source/common/os/os_condition_variable_api.cpp +++ b/libhydrosphere/source/common/os/os_condition_variable_api.cpp @@ -31,8 +31,9 @@ void BroadcastConditionVariable(ConditionVariable *condvar) noexcept { condvar->condition_variable.Broadcast(); } -ConditionVariableStatus WaitTimeoutConditionVariable( - ConditionVariable *condvar, Mutex *mutex, int64_t timeout) noexcept { +ConditionVariableStatus WaitTimeoutConditionVariable(ConditionVariable *condvar, + Mutex *mutex, + int64_t timeout) noexcept { if (!condvar->condition_variable.WaitTimeout(&mutex->critical_section, timeout)) { return ConditionVariableStatus::TimeOut; @@ -41,8 +42,7 @@ ConditionVariableStatus WaitTimeoutConditionVariable( return ConditionVariableStatus::NoTimeOut; } -void WaitConditionVariable(ConditionVariable *condvar, - Mutex *mutex) noexcept { +void WaitConditionVariable(ConditionVariable *condvar, Mutex *mutex) noexcept { condvar->condition_variable.Wait(&mutex->critical_section); } diff --git a/libhydrosphere/source/common/os/os_condition_variable_impl.cpp b/libhydrosphere/source/common/os/os_condition_variable_impl.cpp index 4cc6dc7..742d573 100644 --- a/libhydrosphere/source/common/os/os_condition_variable_impl.cpp +++ b/libhydrosphere/source/common/os/os_condition_variable_impl.cpp @@ -9,8 +9,8 @@ */ #include -#include #include +#include #include #include diff --git a/libhydrosphere/source/common/os/os_critical_section.cpp b/libhydrosphere/source/common/os/os_critical_section.cpp index 76fef35..54090a0 100644 --- a/libhydrosphere/source/common/os/os_critical_section.cpp +++ b/libhydrosphere/source/common/os/os_critical_section.cpp @@ -45,9 +45,9 @@ void CriticalSection::Enter() noexcept { continue; } else { hs::svc::ArbitrateLock( - hs::svc::Handle::FromRawValue(expected_value), - reinterpret_cast(&this->image), - self_thread_handle); + hs::svc::Handle::FromRawValue(expected_value), + reinterpret_cast(&this->image), + self_thread_handle); } } } diff --git a/libhydrosphere/source/common/os/os_kernelevent_api.cpp b/libhydrosphere/source/common/os/os_kernelevent_api.cpp index 11171ef..e3d19fb 100644 --- a/libhydrosphere/source/common/os/os_kernelevent_api.cpp +++ b/libhydrosphere/source/common/os/os_kernelevent_api.cpp @@ -18,12 +18,10 @@ enum KernelEventState { namespace hs::os { -hs::Result CreateKernelEvent(KernelEvent *event, - bool is_auto_clear) noexcept { +hs::Result CreateKernelEvent(KernelEvent *event, bool is_auto_clear) noexcept { svc::Handle readable_handle; svc::Handle writable_handle; - auto result = - hs::svc::CreateEvent(&writable_handle, &readable_handle); + auto result = hs::svc::CreateEvent(&writable_handle, &readable_handle); if (result.Ok()) { event->state = KernelEventState_Initialized; @@ -48,8 +46,9 @@ void WaitKernelEvent(KernelEvent *event) noexcept { int32_t index; // Wait for the event to be signaled - while (hs::svc::WaitSynchronization(&index, - event->readable_handle.GetValuePointer(), 1, -1).Err()) { + while (hs::svc::WaitSynchronization( + &index, event->readable_handle.GetValuePointer(), 1, -1) + .Err()) { } if (event->is_auto_clear) { diff --git a/libhydrosphere/source/common/os/os_thread_api.cpp b/libhydrosphere/source/common/os/os_thread_api.cpp index 3014675..0b62545 100644 --- a/libhydrosphere/source/common/os/os_thread_api.cpp +++ b/libhydrosphere/source/common/os/os_thread_api.cpp @@ -75,7 +75,7 @@ __HS_ATTRIBUTE_VISIBILITY_HIDDEN hs::Result CreateThreadUnsafe( &thread_handle, reinterpret_cast(entry_point), reinterpret_cast(thread), reinterpret_cast(thread->mapped_thread_stack) + - thread->thread_stack_size, + thread->thread_stack_size, thread->priority, cpuid); if (result.Ok()) { thread->thread_handle = thread_handle; @@ -162,8 +162,7 @@ hs::Result CreateThread(Thread *thread, reinterpret_cast(thread->mapped_thread_stack), reinterpret_cast(thread->original_thread_stack), thread->thread_stack_size); - hs::os::detail::g_StackAllocator->Free( - thread->mapped_thread_stack); + hs::os::detail::g_StackAllocator->Free(thread->mapped_thread_stack); } // TODO(Kaenbyō): incremental thread name (with the number of the thread) @@ -206,8 +205,7 @@ void DestroyThread(Thread *thread) noexcept { reinterpret_cast(thread->mapped_thread_stack), reinterpret_cast(thread->original_thread_stack), thread->thread_stack_size); - hs::os::detail::g_StackAllocator->Free( - thread->mapped_thread_stack); + hs::os::detail::g_StackAllocator->Free(thread->mapped_thread_stack); thread->is_alias_thread_stack_mapped = false; thread->mapped_thread_stack = nullptr; } @@ -241,8 +239,7 @@ void WaitThread(Thread *thread) noexcept { reinterpret_cast(thread->mapped_thread_stack), reinterpret_cast(thread->original_thread_stack), thread->thread_stack_size); - hs::os::detail::g_StackAllocator->Free( - thread->mapped_thread_stack); + hs::os::detail::g_StackAllocator->Free(thread->mapped_thread_stack); thread->is_alias_thread_stack_mapped = false; thread->mapped_thread_stack = nullptr; }