diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b2742fa..a6de19f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,6 +1,6 @@ name: CI -on: [push, pull_request, release] +on: [ pull_request ] jobs: build: @@ -8,97 +8,101 @@ jobs: fail-fast: false matrix: config: - - { - name: linux-x64-clang-9, - os: ubuntu-18.04, - cxx: clang++-9, - cmake-build-type: Release - } - - { - name: macos-x64-gcc, - os: macos-10.15, - cxx: g++, - cmake-build-type: Release - } - - { - name: macos-x64-clang, - os: macos-10.15, - cxx: clang++, - cmake-build-type: Release - } - - { - name: linux-x64-clang-12-sanitize, - os: ubuntu-20.04, - cxx: clang++-12, - cxx-flags: "-fsanitize=address,undefined", - cmake-build-type: Release - } - - { - name: linux-x64-gcc-10-coverage, - os: ubuntu-20.04, - cxx: g++-10, - cxx-flags: --coverage, - gcov-tool: gcov-10, - cmake-build-type: Debug - } - - { - name: linux-x64-clang-11, - os: ubuntu-20.04, - cxx: clang++-11, - cmake-build-type: Release - } - - { - name: linux-x64-clang-12, - os: ubuntu-22.04, - cxx: clang++-12, - cmake-build-type: Release - } - - { - name: linux-x64-clang-13, - os: ubuntu-22.04, - cxx: clang++-13, - cmake-build-type: Release - } - - { - name: linux-x64-clang-14, - os: ubuntu-22.04, - cxx: clang++-14, - cmake-build-type: Release - } - - { - name: linux-x64-gcc-11, - os: ubuntu-22.04, - cxx: g++-11, - cmake-build-type: Release - } - name: ${{matrix.config.name}} + # These get queued but never actually run + #- name: macos-x64-gcc, + # os: macos-13.5, + # cxx: g++, + #- name: macos-x64-clang, + # os: macos-13.5, + # cxx: clang++, + + - name: linux-x64-clang-14 + os: ubuntu-22.04 + cxx: clang++-14 + + - name: linux-x64-clang-15 + os: ubuntu-22.04 + cxx: clang++-15 + + - name: linux-x64-clang-16-sanitize + os: ubuntu-22.04 + cxx: clang++-16 + cxx-flags: -fsanitize=undefined -fsanitize=address -fno-omit-frame-pointer -fno-optimize-sibling-calls + + - name: linux-x64-gcc-11-coverage + os: ubuntu-22.04 + cxx: g++-11 + cxx-flags: --coverage + gcov-tool: gcov-11 + + - name: linux-x64-gcc-12 + os: ubuntu-22.04 + cxx: g++-12 + + - name: linux-x64-gcc-13 + os: ubuntu-22.04 + cxx: g++-13 + runs-on: ${{matrix.config.os}} steps: - - uses: actions/checkout@v2 - - # Linux or macOS - - name: Install boost (Linux or macOS) - run: vcpkg install boost-test boost-container boost-interprocess - if: runner.os == 'Linux' || runner.os == 'macOS' - - - name: Configure CMake (Linux or macOS) - run: cmake -DCMAKE_BUILD_TYPE=${{matrix.config.cmake-build-type}} -DCMAKE_TOOLCHAIN_FILE="$VCPKG_INSTALLATION_ROOT/scripts/buildsystems/vcpkg.cmake" -S ${{github.workspace}}/tests -B ${{github.workspace}}/build - env: - CXX: ${{matrix.config.cxx}} - CXXFLAGS: ${{matrix.config.cxx-flags}} - if: runner.os == 'Linux' || runner.os == 'macOS' - - - name: Build (Linux or macOS) - run: cmake --build ${{github.workspace}}/build --verbose - if: runner.os == 'Linux' || runner.os == 'macOS' - - - name: Test (Linux or macOS) - run: ${{github.workspace}}/build/tsl_sparse_map_tests - if: runner.os == 'Linux' || runner.os == 'macOS' - - - name: Coverage - run: | - sudo apt-get install -y lcov - lcov -c -b ${{github.workspace}}/include -d ${{github.workspace}}/build -o ${{github.workspace}}/coverage.info --no-external --gcov-tool ${{matrix.config.gcov-tool}} - bash <(curl -s https://codecov.io/bash) -f ${{github.workspace}}/coverage.info - if: ${{matrix.config.name == 'linux-x64-gcc-10-coverage'}} + - name: Add Repos for for gcc-13 and clang-16 + run: | + # gcc-13 + sudo add-apt-repository ppa:ubuntu-toolchain-r/test -y + + # clang-16 + source /etc/os-release + echo "deb http://apt.llvm.org/${UBUNTU_CODENAME}/ llvm-toolchain-${UBUNTU_CODENAME}-16 main" | sudo tee /etc/apt/sources.list.d/llvm-16.list + curl https://apt.llvm.org/llvm-snapshot.gpg.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/llvm-16.gpg > /dev/null + + sudo apt-get update -y + if: runner.os == 'Linux' + + - name: Install compiler + id: install_cc + uses: rlalik/setup-cpp-compiler@v1.2 + with: + compiler: ${{ matrix.config.cxx }} + if: runner.os == 'Linux' + + - name: Check out sources + uses: actions/checkout@v3 + + - name: Install boost (Linux) + run: sudo apt-get install -y libboost-dev + if: runner.os == 'Linux' + + - name: Install boost (macOS) + run: vcpkg install boost-interprocess + if: runner.os == 'macOS' + + - name: Configure CMake (Linux) + run: cmake -DBUILD_TESTING=ON -DCMAKE_BUILD_TYPE=Debug -B build + env: + CXX: ${{matrix.config.cxx}} + CXXFLAGS: ${{matrix.config.cxx-flags}} + if: runner.os == 'Linux' + + - name: Configure CMake (macOS) + run: cmake -DBUILD_TESTING=ON -DCMAKE_BUILD_TYPE=Debug -DCMAKE_TOOLCHAIN_FILE="$VCPKG_INSTALLATION_ROOT/scripts/buildsystems/vcpkg.cmake" -B build + env: + CXX: ${{matrix.config.cxx}} + CXXFLAGS: ${{matrix.config.cxx-flags}} + if: runner.os == 'macOS' + + - name: Build + working-directory: build + run: cmake --build . --verbose --parallel 2 + if: runner.os == 'Linux' || runner.os == 'macOS' + + - name: Test + working-directory: build + run: ctest --parallel 2 --verbose + if: runner.os == 'Linux' || runner.os == 'macOS' + + - name: Coverage + run: | + sudo apt-get install -y lcov + lcov -c -b ${{github.workspace}}/include -d ${{github.workspace}}/build -o ${{github.workspace}}/coverage.info --no-external --gcov-tool ${{matrix.config.gcov-tool}} + bash <(curl -s https://codecov.io/bash) -f ${{github.workspace}}/coverage.info + if: ${{matrix.config.name == 'linux-x64-gcc-11-coverage'}} diff --git a/.gitignore b/.gitignore index df56dfa..f2e7452 100644 --- a/.gitignore +++ b/.gitignore @@ -80,88 +80,26 @@ environment_run.sh.env # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 # User-specific stuff -.idea/**/workspace.xml -.idea/**/tasks.xml -.idea/**/usage.statistics.xml -.idea/**/dictionaries -.idea/**/shelf - -# AWS User-specific -.idea/**/aws.xml - -# Generated files -.idea/**/contentModel.xml - -# Sensitive or high-churn files -.idea/**/dataSources/ -.idea/**/dataSources.ids -.idea/**/dataSources.local.xml -.idea/**/sqlDataSources.xml -.idea/**/dynamic.xml -.idea/**/uiDesigner.xml -.idea/**/dbnavigator.xml - -# Gradle -.idea/**/gradle.xml -.idea/**/libraries - -# Gradle and Maven with auto-import -# When using Gradle or Maven with auto-import, you should exclude module files, -# since they will be recreated, and may cause churn. Uncomment if using -# auto-import. -# .idea/artifacts -# .idea/compiler.xml -# .idea/jarRepositories.xml -# .idea/modules.xml -# .idea/*.iml -# .idea/modules -# *.iml -# *.ipr +.idea/ # CMake cmake-build-*/ -# Mongo Explorer plugin -.idea/**/mongoSettings.xml - # File-based project format *.iws # IntelliJ out/ -# mpeltonen/sbt-idea plugin -.idea_modules/ - # JIRA plugin atlassian-ide-plugin.xml -# Cursive Clojure plugin -.idea/replstate.xml - -# SonarLint plugin -.idea/sonarlint/ - # Crashlytics plugin (for Android Studio and IntelliJ) com_crashlytics_export_strings.xml crashlytics.properties crashlytics-build.properties fabric.properties -# Editor-based Rest Client -.idea/httpRequests - -# Android studio 3.1+ serialized cache file -.idea/caches/build_file_checksums.ser - -### Intellij+all Patch ### -# Ignore everything but code style settings and run configurations -# that are supposed to be shared within teams. - -.idea/* - -!.idea/codeStyles -!.idea/runConfigurations ### Ninja ### .ninja_deps diff --git a/CMakeLists.txt b/CMakeLists.txt index 151bc84..608694c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,8 +6,6 @@ project(dice-sparse-map include(cmake/boilerplate_init.cmake) boilerplate_init() -find_package(Boost REQUIRED) - add_library(${PROJECT_NAME} INTERFACE) # Use dice::sparse_map as target, more consistent with other libraries conventions (Boost, Qt, ...) add_library("${PROJECT_NAME}::${PROJECT_NAME}" ALIAS "${PROJECT_NAME}") @@ -15,10 +13,6 @@ add_library("${PROJECT_NAME}::${PROJECT_NAME}" ALIAS "${PROJECT_NAME}") target_include_directories(${PROJECT_NAME} INTERFACE "$") -target_link_libraries(${PROJECT_NAME} INTERFACE - Boost::headers - ) - if(MSVC) target_sources(${PROJECT_NAME} INTERFACE "$" @@ -30,4 +24,9 @@ if (IS_TOP_LEVEL) install_interface_library("${PROJECT_NAME}" "${PROJECT_NAME}" "${PROJECT_NAME}" "include") endif () - +if (BUILD_TESTING AND IS_TOP_LEVEL) + message("Building testing") + include(CTest) + enable_testing() + add_subdirectory(tests) +endif () diff --git a/README.md b/README.md index ea27c7b..e549187 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,6 @@ A **benchmark** of `dice::sparse_map::sparse_map` against other hash maps may be - Support for heterogeneous lookups allowing the usage of `find` with a type different than `Key` (e.g. if you have a map that uses `std::unique_ptr` as key, you can use a `foo*` or a `std::uintptr_t` as key parameter to `find` without constructing a `std::unique_ptr`, see [example](#heterogeneous-lookups)). - No need to reserve any sentinel value from the keys. - If the hash is known before a lookup, it is possible to pass it as parameter to speed-up the lookup (see `precalculated_hash` parameter in [API](https://tessil.github.io/sparse-map/classtsl_1_1sparse__map.html)). -- Support for efficient serialization and deserialization (see [example](#serialization) and the `serialize/deserialize` methods in the [API](https://tessil.github.io/sparse-map/classtsl_1_1sparse__map.html) for details). - Possibility to control the balance between insertion speed and memory usage with the `Sparsity` template parameter. A high sparsity means less memory but longer insertion times, and vice-versa for low sparsity. The default medium sparsity offers a good compromise (see [API](https://tessil.github.io/sparse-map/classtsl_1_1sparse__map.html#details) for details). For reference, with simple 64 bits integers as keys and values, a low sparsity offers ~15% faster insertions times but uses ~12% more memory. Nothing change regarding lookup speed. - API closely similar to `std::unordered_map` and `std::unordered_set`. @@ -47,8 +46,6 @@ The library relies heavily on the [popcount](https://en.wikipedia.org/wiki/Hammi With Clang and GCC, the library uses the `__builtin_popcount` function which will use the fast CPU instruction `POPCNT` when the library is compiled with `-mpopcnt`. Using the `POPCNT` instruction offers an improvement of ~15% to ~30% on lookups. So if you are compiling your code for a specific architecture that support the operation, don't forget the `-mpopcnt` (or `-march=native`) flag of your compiler. -On Windows with MSVC, the detection is done at runtime. - #### Move constructor Make sure that your key `Key` and potential value `T` have a `noexcept` move constructor. The library will work without it but insertions will be much slower if the copy constructor is expensive (the structure often needs to move some values around on insertion). @@ -102,23 +99,21 @@ If the project has been installed through `make install`, you can also use `find The code should work with any C++11 standard-compliant compiler and has been tested with GCC 4.8.4, Clang 3.5.0 and Visual Studio 2015. -To run the tests you will need the Boost Test library and CMake. +To run the tests you will need CMake and CTest. ```bash -git clone https://github.com/Tessil/sparse-map.git -cd sparse-map/tests +git clone https://github.com/dice-group/dice-sparse-map.git +cd sparse-map mkdir build cd build -cmake .. +cmake -DBUILD_TESTING=ON .. cmake --build . -./tsl_sparse_map_tests +ctest ``` ### Usage -The API can be found [here](https://tessil.github.io/sparse-map/). - -All methods are not documented yet, but they replicate the behaviour of the ones in `std::unordered_map` and `std::unordered_set`, except if specified otherwise. +Not all methods are documented yet, but they replicate the behaviour of the ones in `std::unordered_map` and `std::unordered_set`, except if specified otherwise. ### Example @@ -263,206 +258,6 @@ int main() { } ``` -#### Serialization - -The library provides an efficient way to serialize and deserialize a map or a set so that it can be saved to a file or send through the network. -To do so, it requires the user to provide a function object for both serialization and deserialization. - -```c++ -struct serializer { - // Must support the following types for U: std::uint64_t, float - // and std::pair if a map is used or Key for a set. - template - void operator()(const U& value); -}; -``` - -```c++ -struct deserializer { - // Must support the following types for U: std::uint64_t, float - // and std::pair if a map is used or Key for a set. - template - U operator()(); -}; -``` - -Note that the implementation leaves binary compatibility (endianness, float binary representation, size of int, ...) of the types it serializes/deserializes in the hands of the provided function objects if compatibility is required. - -More details regarding the `serialize` and `deserialize` methods can be found in the [API](https://tessil.github.io/sparse-map/classtsl_1_1sparse__map.html). - -```c++ -#include -#include -#include -#include -#include - - -class serializer { -public: - serializer(const char* file_name) { - m_ostream.exceptions(m_ostream.badbit | m_ostream.failbit); - m_ostream.open(file_name, std::ios::binary); - } - - template::value>::type* = nullptr> - void operator()(const T& value) { - m_ostream.write(reinterpret_cast(&value), sizeof(T)); - } - - void operator()(const std::pair& value) { - (*this)(value.first); - (*this)(value.second); - } - -private: - std::ofstream m_ostream; -}; - -class deserializer { -public: - deserializer(const char* file_name) { - m_istream.exceptions(m_istream.badbit | m_istream.failbit | m_istream.eofbit); - m_istream.open(file_name, std::ios::binary); - } - - template - T operator()() { - T value; - deserialize(value); - - return value; - } - -private: - template::value>::type* = nullptr> - void deserialize(T& value) { - m_istream.read(reinterpret_cast(&value), sizeof(T)); - } - - void deserialize(std::pair& value) { - deserialize(value.first); - deserialize(value.second); - } - -private: - std::ifstream m_istream; -}; - - -int main() { - const dice::sparse_map::sparse_map map = {{1, -1}, {2, -2}, {3, -3}, {4, -4}}; - - - const char* file_name = "sparse_map.data"; - { - serializer serial(file_name); - map.serialize(serial); - } - - { - deserializer dserial(file_name); - auto map_deserialized = dice::sparse_map::sparse_map::deserialize(dserial); - - assert(map == map_deserialized); - } - - { - deserializer dserial(file_name); - - /** - * If the serialized and deserialized map are hash compatibles (see conditions in API), - * setting the argument to true speed-up the deserialization process as we don't have - * to recalculate the hash of each key. We also know how much space each bucket needs. - */ - const bool hash_compatible = true; - auto map_deserialized = - dice::sparse_map::sparse_map::deserialize(dserial, hash_compatible); - - assert(map == map_deserialized); - } -} -``` - -##### Serialization with Boost Serialization and compression with zlib - -It's possible to use a serialization library to avoid the boilerplate. - -The following example uses Boost Serialization with the Boost zlib compression stream to reduce the size of the resulting serialized file. The example requires C++20 due to the usage of the template parameter list syntax in lambdas, but it can be adapted to less recent versions. - -```c++ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace boost { namespace serialization { - template - void serialize(Archive & ar, dice::sparse_map::sparse_map& map, const unsigned int version) { - split_free(ar, map, version); - } - - template - void save(Archive & ar, const dice::sparse_map::sparse_map& map, const unsigned int /*version*/) { - auto serializer = [&ar](const auto& v) { ar & v; }; - map.serialize(serializer); - } - - template - void load(Archive & ar, dice::sparse_map::sparse_map& map, const unsigned int /*version*/) { - auto deserializer = [&ar]() { U u; ar & u; return u; }; - map = dice::sparse_map::sparse_map::deserialize(deserializer); - } -}} - - -int main() { - dice::sparse_map::sparse_map map = {{1, -1}, {2, -2}, {3, -3}, {4, -4}}; - - - const char* file_name = "sparse_map.data"; - { - std::ofstream ofs; - ofs.exceptions(ofs.badbit | ofs.failbit); - ofs.open(file_name, std::ios::binary); - - boost::iostreams::filtering_ostream fo; - fo.push(boost::iostreams::zlib_compressor()); - fo.push(ofs); - - boost::archive::binary_oarchive oa(fo); - - oa << map; - } - - { - std::ifstream ifs; - ifs.exceptions(ifs.badbit | ifs.failbit | ifs.eofbit); - ifs.open(file_name, std::ios::binary); - - boost::iostreams::filtering_istream fi; - fi.push(boost::iostreams::zlib_decompressor()); - fi.push(ifs); - - boost::archive::binary_iarchive ia(fi); - - dice::sparse_map::sparse_map map_deserialized; - ia >> map_deserialized; - - assert(map == map_deserialized); - } -} -``` - ### License The code is licensed under the MIT license, see the LICENSE files ([1](LICENSE-tsl-sparse-map), [2](LICENSE-dice-sparse-map)) for details. diff --git a/doxygen.conf b/doxygen.conf index 115e07c..9e206f0 100644 --- a/doxygen.conf +++ b/doxygen.conf @@ -895,7 +895,7 @@ tsl::detail_popcount::* \ tsl::detail_sparse_hash::has_is_transparent* \ tsl::detail_sparse_hash::make_void* \ tsl::detail_sparse_hash::is_power_of_two_policy* \ -tsl::detail_sparse_hash::sparse_array* +tsl::detail_sparse_hash::sparse_array_type* # The EXAMPLE_PATH tag can be used to specify one or more files or directories # that contain example code fragments that are included (see the \include diff --git a/include/dice/sparse-map/boost_offset_pointer.hpp b/include/dice/sparse-map/boost_offset_pointer.hpp deleted file mode 100644 index 41b329d..0000000 --- a/include/dice/sparse-map/boost_offset_pointer.hpp +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef DICE_SPARSE_MAP_BOOST_OFFSET_POINTER_HPP -#define DICE_SPARSE_MAP_BOOST_OFFSET_POINTER_HPP - -#include "dice/sparse-map/sparse_hash.hpp" //needed, so the basic template is already included -#include - -namespace dice::sparse_map { -/* Template specialisation for a "const_cast" of a boost offset_ptr. - * @tparam PT PointedType - * @tparam DT DifferenceType - * @tparam OT OffsetType - * @tparam OA OffsetAlignment - */ -template -struct Remove_Const> { - template - static boost::interprocess::offset_ptr - remove(T const &const_iter) { - return boost::interprocess::const_pointer_cast(const_iter); - } -}; -} // namespace dice - -#endif // DICE_SPARSE_MAP_BOOST_OFFSET_POINTER_HPP diff --git a/include/dice/sparse-map/sparse_growth_policy.hpp b/include/dice/sparse-map/sparse_growth_policy.hpp deleted file mode 100644 index de69281..0000000 --- a/include/dice/sparse-map/sparse_growth_policy.hpp +++ /dev/null @@ -1,299 +0,0 @@ -/** - * MIT License - * - * Copyright (c) 2017 Thibaut Goetghebuer-Planchon - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef DICE_SPARSE_MAP_SPARSE_GROWTH_POLICY_HPP -#define DICE_SPARSE_MAP_SPARSE_GROWTH_POLICY_HPP - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace dice::sparse_map::sh { - -/** - * Grow the hash table by a factor of GrowthFactor keeping the bucket count to a - * power of two. It allows the table to use a mask operation instead of a modulo - * operation to map a hash to a bucket. - * - * GrowthFactor must be a power of two >= 2. - */ -template -class power_of_two_growth_policy { - public: - /** - * Called on the hash table creation and on rehash. The number of buckets for - * the table is passed in parameter. This number is a minimum, the policy may - * update this value with a higher value if needed (but not lower). - * - * If 0 is given, min_bucket_count_in_out must still be 0 after the policy - * creation and bucket_for_hash must always return 0 in this case. - */ - explicit power_of_two_growth_policy(std::size_t &min_bucket_count_in_out) { - if (min_bucket_count_in_out > max_bucket_count()) { - throw std::length_error("The hash table exceeds its maximum size."); - } - - if (min_bucket_count_in_out > 0) { - min_bucket_count_in_out = - round_up_to_power_of_two(min_bucket_count_in_out); - m_mask = min_bucket_count_in_out - 1; - } else { - m_mask = 0; - } - } - - /** - * Return the bucket [0, bucket_count()) to which the hash belongs. - * If bucket_count() is 0, it must always return 0. - */ - std::size_t bucket_for_hash(std::size_t hash) const noexcept { - return hash & m_mask; - } - - /** - * Return the number of buckets that should be used on next growth. - */ - std::size_t next_bucket_count() const { - if ((m_mask + 1) > max_bucket_count() / GrowthFactor) { - throw std::length_error("The hash table exceeds its maximum size."); - } - - return (m_mask + 1) * GrowthFactor; - } - - /** - * Return the maximum number of buckets supported by the policy. - */ - std::size_t max_bucket_count() const { - // Largest power of two. - return (std::numeric_limits::max() / 2) + 1; - } - - /** - * Reset the growth policy as if it was created with a bucket count of 0. - * After a clear, the policy must always return 0 when bucket_for_hash is - * called. - */ - void clear() noexcept { m_mask = 0; } - - private: - static std::size_t round_up_to_power_of_two(std::size_t value) { - if (is_power_of_two(value)) { - return value; - } - - if (value == 0) { - return 1; - } - - --value; - for (std::size_t i = 1; i < sizeof(std::size_t) * CHAR_BIT; i *= 2) { - value |= value >> i; - } - - return value + 1; - } - - static constexpr bool is_power_of_two(std::size_t value) { - return value != 0 && (value & (value - 1)) == 0; - } - - protected: - static_assert(is_power_of_two(GrowthFactor) && GrowthFactor >= 2, - "GrowthFactor must be a power of two >= 2."); - - std::size_t m_mask; -}; - -/** - * Grow the hash table by GrowthFactor::num / GrowthFactor::den and use a modulo - * to map a hash to a bucket. Slower but it can be useful if you want a slower - * growth. - */ -template > -class mod_growth_policy { - public: - explicit mod_growth_policy(std::size_t &min_bucket_count_in_out) { - if (min_bucket_count_in_out > max_bucket_count()) { - throw std::length_error("The hash table exceeds its maximum size."); - } - - if (min_bucket_count_in_out > 0) { - m_mod = min_bucket_count_in_out; - } else { - m_mod = 1; - } - } - - std::size_t bucket_for_hash(std::size_t hash) const noexcept { - return hash % m_mod; - } - - std::size_t next_bucket_count() const { - if (m_mod == max_bucket_count()) { - throw std::length_error("The hash table exceeds its maximum size."); - } - - const double next_bucket_count = - std::ceil(double(m_mod) * REHASH_SIZE_MULTIPLICATION_FACTOR); - if (!std::isnormal(next_bucket_count)) { - throw std::length_error("The hash table exceeds its maximum size."); - } - - if (next_bucket_count > double(max_bucket_count())) { - return max_bucket_count(); - } else { - return std::size_t(next_bucket_count); - } - } - - std::size_t max_bucket_count() const { return MAX_BUCKET_COUNT; } - - void clear() noexcept { m_mod = 1; } - - private: - static constexpr double REHASH_SIZE_MULTIPLICATION_FACTOR = - 1.0 * GrowthFactor::num / GrowthFactor::den; - static const std::size_t MAX_BUCKET_COUNT = - std::size_t(double(std::numeric_limits::max() / - REHASH_SIZE_MULTIPLICATION_FACTOR)); - - static_assert(REHASH_SIZE_MULTIPLICATION_FACTOR >= 1.1, - "Growth factor should be >= 1.1."); - - std::size_t m_mod; -}; - -/** - * Grow the hash table by using prime numbers as bucket count. Slower than - * dice::sh::power_of_two_growth_policy in general but will probably distribute - * the values around better in the buckets with a poor hash function. - * - * To allow the compiler to optimize the modulo operation, a lookup table is - * used with constant primes numbers. - * - * With a switch the code would look like: - * \code - * switch(iprime) { // iprime is the current prime of the hash table - * case 0: hash % 5ul; - * break; - * case 1: hash % 17ul; - * break; - * case 2: hash % 29ul; - * break; - * ... - * } - * \endcode - * - * Due to the constant variable in the modulo the compiler is able to optimize - * the operation by a series of multiplications, substractions and shifts. - * - * The 'hash % 5' could become something like 'hash - (hash * 0xCCCCCCCD) >> 34) - * * 5' in a 64 bits environment. - */ -class prime_growth_policy { - public: - explicit prime_growth_policy(std::size_t &min_bucket_count_in_out) { - auto it_prime = std::lower_bound(primes().begin(), primes().end(), - min_bucket_count_in_out); - if (it_prime == primes().end()) { - throw std::length_error("The hash table exceeds its maximum size."); - } - - m_iprime = - static_cast(std::distance(primes().begin(), it_prime)); - if (min_bucket_count_in_out > 0) { - min_bucket_count_in_out = *it_prime; - } else { - min_bucket_count_in_out = 0; - } - } - - std::size_t bucket_for_hash(std::size_t hash) const noexcept { - return mod_prime()[m_iprime](hash); - } - - std::size_t next_bucket_count() const { - if (m_iprime + 1 >= primes().size()) { - throw std::length_error("The hash table exceeds its maximum size."); - } - - return primes()[m_iprime + 1]; - } - - std::size_t max_bucket_count() const { return primes().back(); } - - void clear() noexcept { m_iprime = 0; } - - private: - static const std::array &primes() { - static const std::array PRIMES = { - {1ul, 5ul, 17ul, 29ul, 37ul, - 53ul, 67ul, 79ul, 97ul, 131ul, - 193ul, 257ul, 389ul, 521ul, 769ul, - 1031ul, 1543ul, 2053ul, 3079ul, 6151ul, - 12289ul, 24593ul, 49157ul, 98317ul, 196613ul, - 393241ul, 786433ul, 1572869ul, 3145739ul, 6291469ul, - 12582917ul, 25165843ul, 50331653ul, 100663319ul, 201326611ul, - 402653189ul, 805306457ul, 1610612741ul, 3221225473ul, 4294967291ul}}; - - static_assert( - std::numeric_limits::max() >= PRIMES.size(), - "The type of m_iprime is not big enough."); - - return PRIMES; - } - - static const std::array &mod_prime() { - // MOD_PRIME[iprime](hash) returns hash % PRIMES[iprime]. This table allows - // for faster modulo as the compiler can optimize the modulo code better - // with a constant known at the compilation. - static const std::array MOD_PRIME = { - {&mod<0>, &mod<1>, &mod<2>, &mod<3>, &mod<4>, &mod<5>, &mod<6>, - &mod<7>, &mod<8>, &mod<9>, &mod<10>, &mod<11>, &mod<12>, &mod<13>, - &mod<14>, &mod<15>, &mod<16>, &mod<17>, &mod<18>, &mod<19>, &mod<20>, - &mod<21>, &mod<22>, &mod<23>, &mod<24>, &mod<25>, &mod<26>, &mod<27>, - &mod<28>, &mod<29>, &mod<30>, &mod<31>, &mod<32>, &mod<33>, &mod<34>, - &mod<35>, &mod<36>, &mod<37>, &mod<38>, &mod<39>}}; - - return MOD_PRIME; - } - - template - static std::size_t mod(std::size_t hash) { - return hash % primes()[IPrime]; - } - - private: - unsigned int m_iprime; -}; - -} // namespace dice - -#endif diff --git a/include/dice/sparse-map/sparse_hash.hpp b/include/dice/sparse-map/sparse_hash.hpp deleted file mode 100644 index b966305..0000000 --- a/include/dice/sparse-map/sparse_hash.hpp +++ /dev/null @@ -1,2307 +0,0 @@ -/** - * MIT License - * - * Copyright (c) 2017 Thibaut Goetghebuer-Planchon - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef DICE_SPARSE_MAP_SPARSE_HASH_HPP -#define DICE_SPARSE_MAP_SPARSE_HASH_HPP - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "dice/sparse-map/sparse_growth_policy.hpp" -#include "boost/container/vector.hpp" - -#ifdef __INTEL_COMPILER -#include // For _popcnt32 and _popcnt64 -#endif - -#ifdef _MSC_VER -#include // For __cpuid, __popcnt and __popcnt64 -#endif - -#ifdef TSL_DEBUG -#define tsl_sh_assert(expr) assert(expr) -#else -#define tsl_sh_assert(expr) (static_cast(0)) -#endif - -namespace dice::sparse_map { - -namespace sh { -enum class probing { linear, quadratic }; - -enum class exception_safety { basic, strong }; - -enum class sparsity { high, medium, low }; -} // namespace sh - -namespace detail_popcount { -/** - * Define the popcount(ll) methods and pick-up the best depending on the - * compiler. - */ - -// From Wikipedia: https://en.wikipedia.org/wiki/Hamming_weight -inline int fallback_popcountll(unsigned long long int x) { - static_assert( - sizeof(unsigned long long int) == sizeof(std::uint64_t), - "sizeof(unsigned long long int) must be equal to sizeof(std::uint64_t). " - "Open a feature request if you need support for a platform where it " - "isn't the case."); - - const std::uint64_t m1 = 0x5555555555555555ull; - const std::uint64_t m2 = 0x3333333333333333ull; - const std::uint64_t m4 = 0x0f0f0f0f0f0f0f0full; - const std::uint64_t h01 = 0x0101010101010101ull; - - x -= (x >> 1ull) & m1; - x = (x & m2) + ((x >> 2ull) & m2); - x = (x + (x >> 4ull)) & m4; - return static_cast((x * h01) >> (64ull - 8ull)); -} - -inline int fallback_popcount(unsigned int x) { - static_assert(sizeof(unsigned int) == sizeof(std::uint32_t) || - sizeof(unsigned int) == sizeof(std::uint64_t), - "sizeof(unsigned int) must be equal to sizeof(std::uint32_t) " - "or sizeof(std::uint64_t). " - "Open a feature request if you need support for a platform " - "where it isn't the case."); - - if (sizeof(unsigned int) == sizeof(std::uint32_t)) { - const std::uint32_t m1 = 0x55555555; - const std::uint32_t m2 = 0x33333333; - const std::uint32_t m4 = 0x0f0f0f0f; - const std::uint32_t h01 = 0x01010101; - - x -= (x >> 1) & m1; - x = (x & m2) + ((x >> 2) & m2); - x = (x + (x >> 4)) & m4; - return static_cast((x * h01) >> (32 - 8)); - } else { - return fallback_popcountll(x); - } -} - -#if defined(__clang__) || defined(__GNUC__) -inline int popcountll(unsigned long long int value) { - return __builtin_popcountll(value); -} - -inline int popcount(unsigned int value) { return __builtin_popcount(value); } - -#elif defined(_MSC_VER) -/** - * We need to check for popcount support at runtime on Windows with __cpuid - * See https://msdn.microsoft.com/en-us/library/bb385231.aspx - */ -inline bool has_popcount_support() { - int cpu_infos[4]; - __cpuid(cpu_infos, 1); - return (cpu_infos[2] & (1 << 23)) != 0; -} - -inline int popcountll(unsigned long long int value) { -#ifdef _WIN64 - static_assert( - sizeof(unsigned long long int) == sizeof(std::int64_t), - "sizeof(unsigned long long int) must be equal to sizeof(std::int64_t). "); - - static const bool has_popcount = has_popcount_support(); - return has_popcount - ? static_cast(__popcnt64(static_cast(value))) - : fallback_popcountll(value); -#else - return fallback_popcountll(value); -#endif -} - -inline int popcount(unsigned int value) { - static_assert(sizeof(unsigned int) == sizeof(std::int32_t), - "sizeof(unsigned int) must be equal to sizeof(std::int32_t). "); - - static const bool has_popcount = has_popcount_support(); - return has_popcount - ? static_cast(__popcnt(static_cast(value))) - : fallback_popcount(value); -} - -#elif defined(__INTEL_COMPILER) -inline int popcountll(unsigned long long int value) { - static_assert(sizeof(unsigned long long int) == sizeof(__int64), ""); - return _popcnt64(static_cast<__int64>(value)); -} - -inline int popcount(unsigned int value) { - return _popcnt32(static_cast(value)); -} - -#else -inline int popcountll(unsigned long long int x) { - return fallback_popcountll(x); -} - -inline int popcount(unsigned int x) { return fallback_popcount(x); } - -#endif -} // namespace detail_popcount - - -/* Replacement for const_cast in sparse_array. - * Can be overloaded for specific fancy pointers - * (see: include/dice/boost_offset_pointer.h). - * This is just a workaround. - * The clean way would be to change the implementation to stop using const_cast. - */ - template - struct Remove_Const { - template - static T remove(V iter) { - return const_cast(iter); - } - }; - -namespace detail_sparse_hash { - /* to_address can convert any raw or fancy pointer into a raw pointer. - * It is needed for the allocator construct and destroy calls. - * This specific implementation is based on boost 1.71.0. - */ -#if __cplusplus >= 201400L // with 14-features - template - T *to_address(T *v) noexcept { return v; } - - namespace fancy_ptr_detail { - template - inline T *ptr_address(T *v, int) noexcept { return v; } - - template - inline auto ptr_address(const T &v, int) noexcept - -> decltype(std::pointer_traits::to_address(v)) { - return std::pointer_traits::to_address(v); - } - template - inline auto ptr_address(const T &v, long) noexcept { - return fancy_ptr_detail::ptr_address(v.operator->(), 0); - } - } // namespace detail - - template inline auto to_address(const T &v) noexcept { - return fancy_ptr_detail::ptr_address(v, 0); - } -#else // without 14-features - template - inline T *to_address(T *v) noexcept { return v; } - - template - inline typename std::pointer_traits::element_type * to_address(const T &v) noexcept { - return detail_sparse_hash::to_address(v.operator->()); - } -#endif - - -template -struct make_void { - using type = void; -}; - -template -struct has_is_transparent : std::false_type {}; - -template -struct has_is_transparent::type> - : std::true_type {}; - -template -struct is_power_of_two_policy : std::false_type {}; - -template -struct is_power_of_two_policy> - : std::true_type {}; - -inline constexpr bool is_power_of_two(std::size_t value) { - return value != 0 && (value & (value - 1)) == 0; -} - -inline std::size_t round_up_to_power_of_two(std::size_t value) { - if (is_power_of_two(value)) { - return value; - } - - if (value == 0) { - return 1; - } - - --value; - for (std::size_t i = 1; i < sizeof(std::size_t) * CHAR_BIT; i *= 2) { - value |= value >> i; - } - - return value + 1; -} - -template -static T numeric_cast(U value, - const char *error_message = "numeric_cast() failed.") { - T ret = static_cast(value); - if (static_cast(ret) != value) { - throw std::runtime_error(error_message); - } - - const bool is_same_signedness = - (std::is_unsigned::value && std::is_unsigned::value) || - (std::is_signed::value && std::is_signed::value); - if (!is_same_signedness && (ret < T{}) != (value < U{})) { - throw std::runtime_error(error_message); - } - - return ret; -} - -/** - * Fixed size type used to represent size_type values on serialization. Need to - * be big enough to represent a std::size_t on 32 and 64 bits platforms, and - * must be the same size on both platforms. - */ -using slz_size_type = std::uint64_t; -static_assert(std::numeric_limits::max() >= - std::numeric_limits::max(), - "slz_size_type must be >= std::size_t"); - -template -static T deserialize_value(Deserializer &deserializer) { - // MSVC < 2017 is not conformant, circumvent the problem by removing the - // template keyword -#if defined(_MSC_VER) && _MSC_VER < 1910 - return deserializer.Deserializer::operator()(); -#else - return deserializer.Deserializer::template operator()(); -#endif -} - -/** - * WARNING: the sparse_array class doesn't free the ressources allocated through - * the allocator passed in parameter in each method. You have to manually call - * `clear(Allocator&)` when you don't need a sparse_array object anymore. - * - * The reason is that the sparse_array doesn't store the allocator to avoid - * wasting space in each sparse_array when the allocator has a size > 0. It only - * allocates/deallocates objects with the allocator that is passed in parameter. - * - * - * - * Index denotes a value between [0, BITMAP_NB_BITS), it is an index similar to - * std::vector. Offset denotes the real position in `m_values` corresponding to - * an index. - * - * We are using raw pointers instead of std::vector to avoid loosing - * 2*sizeof(size_t) bytes to store the capacity and size of the vector in each - * sparse_array. We know we can only store up to BITMAP_NB_BITS elements in the - * array, we don't need such big types. - * - * - * T must be nothrow move constructible and/or copy constructible. - * Behaviour is undefined if the destructor of T throws an exception. - * - * See https://smerity.com/articles/2015/google_sparsehash.html for details on - * the idea behinds the implementation. - * - * TODO Check to use std::realloc and std::memmove when possible - */ -template -class sparse_array { - public: - using value_type = T; - using size_type = std::uint_least8_t; - using allocator_type = Allocator; - using allocator_traits = std::allocator_traits; - using pointer = typename allocator_traits::pointer; - using const_pointer = typename allocator_traits::const_pointer; - using iterator = pointer; - using const_iterator = const_pointer; - - private: - static const size_type CAPACITY_GROWTH_STEP = - (Sparsity == dice::sparse_map::sh::sparsity::high) ? 2 - : (Sparsity == dice::sparse_map::sh::sparsity::medium) - ? 4 - : 8; // (Sparsity == dice::sh::sparsity::low) - - /** - * Bitmap size configuration. - * Use 32 bits for the bitmap on 32-bits or less environnement as popcount on - * 64 bits numbers is slow on these environnement. Use 64 bits bitmap - * otherwise. - */ -#if SIZE_MAX <= UINT32_MAX - using bitmap_type = std::uint_least32_t; - static const std::size_t BITMAP_NB_BITS = 32; - static const std::size_t BUCKET_SHIFT = 5; -#else - using bitmap_type = std::uint_least64_t; - static const std::size_t BITMAP_NB_BITS = 64; - static const std::size_t BUCKET_SHIFT = 6; -#endif - - static const std::size_t BUCKET_MASK = BITMAP_NB_BITS - 1; - - static_assert(is_power_of_two(BITMAP_NB_BITS), - "BITMAP_NB_BITS must be a power of two."); - static_assert(std::numeric_limits::digits >= BITMAP_NB_BITS, - "bitmap_type must be able to hold at least BITMAP_NB_BITS."); - static_assert((std::size_t(1) << BUCKET_SHIFT) == BITMAP_NB_BITS, - "(1 << BUCKET_SHIFT) must be equal to BITMAP_NB_BITS."); - static_assert(std::numeric_limits::max() >= BITMAP_NB_BITS, - "size_type must be big enough to hold BITMAP_NB_BITS."); - static_assert(std::is_unsigned::value, - "bitmap_type must be unsigned."); - static_assert((std::numeric_limits::max() & BUCKET_MASK) == - BITMAP_NB_BITS - 1, - ""); - - public: - /** - * Map an ibucket [0, bucket_count) in the hash table to a sparse_ibucket - * (a sparse_array holds multiple buckets, so there is less sparse_array than - * bucket_count). - * - * The bucket ibucket is in - * m_sparse_buckets[sparse_ibucket(ibucket)][index_in_sparse_bucket(ibucket)] - * instead of something like m_buckets[ibucket] in a classical hash table. - */ - static std::size_t sparse_ibucket(std::size_t ibucket) { - return ibucket >> BUCKET_SHIFT; - } - - /** - * Map an ibucket [0, bucket_count) in the hash table to an index in the - * sparse_array which corresponds to the bucket. - * - * The bucket ibucket is in - * m_sparse_buckets[sparse_ibucket(ibucket)][index_in_sparse_bucket(ibucket)] - * instead of something like m_buckets[ibucket] in a classical hash table. - */ - static typename sparse_array::size_type index_in_sparse_bucket( - std::size_t ibucket) { - return static_cast( - ibucket & sparse_array::BUCKET_MASK); - } - - static std::size_t nb_sparse_buckets(std::size_t bucket_count) noexcept { - if (bucket_count == 0) { - return 0; - } - - return std::max( - 1, sparse_ibucket(dice::sparse_map::detail_sparse_hash::round_up_to_power_of_two( - bucket_count))); - } - - public: - sparse_array() noexcept - : m_values(nullptr), - m_bitmap_vals(0), - m_bitmap_deleted_vals(0), - m_nb_elements(0), - m_capacity(0), - m_last_array(false) {} - - //needed for "is_constructible" with no parameters - sparse_array(std::allocator_arg_t, Allocator const&) noexcept : sparse_array() {} - - explicit sparse_array(bool last_bucket) noexcept - : m_values(nullptr), - m_bitmap_vals(0), - m_bitmap_deleted_vals(0), - m_nb_elements(0), - m_capacity(0), - m_last_array(last_bucket) {} - - //const Allocator needed for MoveInsertable requirement - sparse_array(size_type capacity, Allocator const &const_alloc) - : m_values(nullptr), - m_bitmap_vals(0), - m_bitmap_deleted_vals(0), - m_nb_elements(0), - m_capacity(capacity), - m_last_array(false) { - if (m_capacity > 0) { - auto alloc = const_cast(const_alloc); - m_values = alloc.allocate(m_capacity); - tsl_sh_assert(m_values != - nullptr); // allocate should throw if there is a failure - } - } - - //const Allocator needed for MoveInsertable requirement - sparse_array(const sparse_array &other, Allocator const &const_alloc) - : m_values(nullptr), - m_bitmap_vals(other.m_bitmap_vals), - m_bitmap_deleted_vals(other.m_bitmap_deleted_vals), - m_nb_elements(0), - m_capacity(other.m_capacity), - m_last_array(other.m_last_array) { - tsl_sh_assert(other.m_capacity >= other.m_nb_elements); - if (m_capacity == 0) { - return; - } - - auto alloc = const_cast(const_alloc); - m_values = alloc.allocate(m_capacity); - tsl_sh_assert(m_values != - nullptr); // allocate should throw if there is a failure - try { - for (size_type i = 0; i < other.m_nb_elements; i++) { - construct_value(alloc, m_values + i, other.m_values[i]); - m_nb_elements++; - } - } catch (...) { - clear(alloc); - throw; - } - } - - sparse_array(sparse_array &&other) noexcept - : m_values(other.m_values), - m_bitmap_vals(other.m_bitmap_vals), - m_bitmap_deleted_vals(other.m_bitmap_deleted_vals), - m_nb_elements(other.m_nb_elements), - m_capacity(other.m_capacity), - m_last_array(other.m_last_array) { - other.m_values = nullptr; - other.m_bitmap_vals = 0; - other.m_bitmap_deleted_vals = 0; - other.m_nb_elements = 0; - other.m_capacity = 0; - } - - //const Allocator needed for MoveInsertable requirement - sparse_array(sparse_array &&other, Allocator const &const_alloc) - : m_values(nullptr), - m_bitmap_vals(other.m_bitmap_vals), - m_bitmap_deleted_vals(other.m_bitmap_deleted_vals), - m_nb_elements(0), - m_capacity(other.m_capacity), - m_last_array(other.m_last_array) { - tsl_sh_assert(other.m_capacity >= other.m_nb_elements); - if (m_capacity == 0) { - return; - } - - auto alloc = const_cast(const_alloc); - m_values = alloc.allocate(m_capacity); - tsl_sh_assert(m_values != - nullptr); // allocate should throw if there is a failure - try { - for (size_type i = 0; i < other.m_nb_elements; i++) { - construct_value(alloc, m_values + i, std::move(other.m_values[i])); - m_nb_elements++; - } - } catch (...) { - clear(alloc); - throw; - } - } - - sparse_array &operator=(const sparse_array &) = delete; - sparse_array &operator=(sparse_array &&other) noexcept{ - this->m_values = other.m_values; - this->m_bitmap_vals = other.m_bitmap_vals; - this->m_bitmap_deleted_vals = other.m_bitmap_deleted_vals; - this->m_nb_elements = other.m_nb_elements; - this->m_capacity = other.m_capacity; - other.m_values = nullptr; - other.m_bitmap_vals = 0; - other.m_bitmap_deleted_vals = 0; - other.m_nb_elements = 0; - other.m_capacity = 0; - return *this; - } - - - - ~sparse_array() noexcept { - // The code that manages the sparse_array must have called clear before - // destruction. See documentation of sparse_array for more details. - tsl_sh_assert(m_capacity == 0 && m_nb_elements == 0 && m_values == nullptr); - } - - iterator begin() noexcept { return m_values; } - iterator end() noexcept { return m_values + m_nb_elements; } - const_iterator begin() const noexcept { return cbegin(); } - const_iterator end() const noexcept { return cend(); } - const_iterator cbegin() const noexcept { return m_values; } - const_iterator cend() const noexcept { return m_values + m_nb_elements; } - - bool empty() const noexcept { return m_nb_elements == 0; } - - size_type size() const noexcept { return m_nb_elements; } - - void clear(allocator_type &alloc) noexcept { - destroy_and_deallocate_values(alloc, m_values, m_nb_elements, m_capacity); - - m_values = nullptr; - m_bitmap_vals = 0; - m_bitmap_deleted_vals = 0; - m_nb_elements = 0; - m_capacity = 0; - } - - bool last() const noexcept { return m_last_array; } - - void set_as_last() noexcept { m_last_array = true; } - - bool has_value(size_type index) const noexcept { - tsl_sh_assert(index < BITMAP_NB_BITS); - return (m_bitmap_vals & (bitmap_type(1) << index)) != 0; - } - - bool has_deleted_value(size_type index) const noexcept { - tsl_sh_assert(index < BITMAP_NB_BITS); - return (m_bitmap_deleted_vals & (bitmap_type(1) << index)) != 0; - } - - iterator value(size_type index) noexcept { - tsl_sh_assert(has_value(index)); - return m_values + index_to_offset(index); - } - - const_iterator value(size_type index) const noexcept { - tsl_sh_assert(has_value(index)); - return m_values + index_to_offset(index); - } - - /** - * Return iterator to set value. - */ - template - iterator set(allocator_type &alloc, size_type index, Args &&...value_args) { - tsl_sh_assert(!has_value(index)); - - const size_type offset = index_to_offset(index); - insert_at_offset(alloc, offset, std::forward(value_args)...); - - m_bitmap_vals = (m_bitmap_vals | (bitmap_type(1) << index)); - m_bitmap_deleted_vals = - (m_bitmap_deleted_vals & ~(bitmap_type(1) << index)); - - m_nb_elements++; - - tsl_sh_assert(has_value(index)); - tsl_sh_assert(!has_deleted_value(index)); - - return m_values + offset; - } - - iterator erase(allocator_type &alloc, iterator position) { - const size_type offset = - static_cast(std::distance(begin(), position)); - return erase(alloc, position, offset_to_index(offset)); - } - - // Return the next value or end if no next value - iterator erase(allocator_type &alloc, iterator position, size_type index) { - tsl_sh_assert(has_value(index)); - tsl_sh_assert(!has_deleted_value(index)); - - const size_type offset = - static_cast(std::distance(begin(), position)); - erase_at_offset(alloc, offset); - - m_bitmap_vals = (m_bitmap_vals & ~(bitmap_type(1) << index)); - m_bitmap_deleted_vals = (m_bitmap_deleted_vals | (bitmap_type(1) << index)); - - m_nb_elements--; - - tsl_sh_assert(!has_value(index)); - tsl_sh_assert(has_deleted_value(index)); - - return m_values + offset; - } - - void swap(sparse_array &other) { - using std::swap; - - swap(m_values, other.m_values); - swap(m_bitmap_vals, other.m_bitmap_vals); - swap(m_bitmap_deleted_vals, other.m_bitmap_deleted_vals); - swap(m_nb_elements, other.m_nb_elements); - swap(m_capacity, other.m_capacity); - swap(m_last_array, other.m_last_array); - } - - static iterator mutable_iterator(const_iterator pos) { - return ::dice::sparse_map::Remove_Const::template remove(pos); - } - - template - void serialize(Serializer &serializer) const { - const slz_size_type sparse_bucket_size = m_nb_elements; - serializer(sparse_bucket_size); - - const slz_size_type bitmap_vals = m_bitmap_vals; - serializer(bitmap_vals); - - const slz_size_type bitmap_deleted_vals = m_bitmap_deleted_vals; - serializer(bitmap_deleted_vals); - - for (const value_type &value : *this) { - serializer(value); - } - } - - template - static sparse_array deserialize_hash_compatible(Deserializer &deserializer, - Allocator &alloc) { - const slz_size_type sparse_bucket_size = - deserialize_value(deserializer); - const slz_size_type bitmap_vals = - deserialize_value(deserializer); - const slz_size_type bitmap_deleted_vals = - deserialize_value(deserializer); - - if (sparse_bucket_size > BITMAP_NB_BITS) { - throw std::runtime_error( - "Deserialized sparse_bucket_size is too big for the platform. " - "Maximum should be BITMAP_NB_BITS."); - } - - sparse_array sarray; - if (sparse_bucket_size == 0) { - return sarray; - } - - sarray.m_bitmap_vals = numeric_cast( - bitmap_vals, "Deserialized bitmap_vals is too big."); - sarray.m_bitmap_deleted_vals = numeric_cast( - bitmap_deleted_vals, "Deserialized bitmap_deleted_vals is too big."); - - sarray.m_capacity = numeric_cast( - sparse_bucket_size, "Deserialized sparse_bucket_size is too big."); - sarray.m_values = alloc.allocate(sarray.m_capacity); - - try { - for (size_type ivalue = 0; ivalue < sarray.m_capacity; ivalue++) { - construct_value(alloc, sarray.m_values + ivalue, - deserialize_value(deserializer)); - sarray.m_nb_elements++; - } - } catch (...) { - sarray.clear(alloc); - throw; - } - - return sarray; - } - - /** - * Deserialize the values of the bucket and insert them all in sparse_hash - * through sparse_hash.insert(...). - */ - template - static void deserialize_values_into_sparse_hash(Deserializer &deserializer, - SparseHash &sparse_hash) { - const slz_size_type sparse_bucket_size = - deserialize_value(deserializer); - - const slz_size_type bitmap_vals = - deserialize_value(deserializer); - static_cast(bitmap_vals); // Ignore, not needed - - const slz_size_type bitmap_deleted_vals = - deserialize_value(deserializer); - static_cast(bitmap_deleted_vals); // Ignore, not needed - - for (slz_size_type ivalue = 0; ivalue < sparse_bucket_size; ivalue++) { - sparse_hash.insert(deserialize_value(deserializer)); - } - } - - private: - template - static void construct_value(allocator_type &alloc, pointer value, - Args &&... value_args) { - std::allocator_traits::construct( - alloc, detail_sparse_hash::to_address(value), std::forward(value_args)...); - } - - static void destroy_value(allocator_type &alloc, pointer value) noexcept { - std::allocator_traits::destroy(alloc, detail_sparse_hash::to_address(value)); - } - - static void destroy_and_deallocate_values( - allocator_type &alloc, pointer values, size_type nb_values, - size_type capacity_values) noexcept { - for (size_type i = 0; i < nb_values; i++) { - destroy_value(alloc, values + i); - } - - alloc.deallocate(values, capacity_values); - } - - static size_type popcount(bitmap_type val) noexcept { - if (sizeof(bitmap_type) <= sizeof(unsigned int)) { - return static_cast( - dice::sparse_map::detail_popcount::popcount(static_cast(val))); - } else { - return static_cast(dice::sparse_map::detail_popcount::popcountll(val)); - } - } - - size_type index_to_offset(size_type index) const noexcept { - tsl_sh_assert(index < BITMAP_NB_BITS); - return popcount(m_bitmap_vals & - ((bitmap_type(1) << index) - bitmap_type(1))); - } - - // TODO optimize - size_type offset_to_index(size_type offset) const noexcept { - tsl_sh_assert(offset < m_nb_elements); - - bitmap_type bitmap_vals = m_bitmap_vals; - size_type index = 0; - size_type nb_ones = 0; - - while (bitmap_vals != 0) { - if ((bitmap_vals & 0x1) == 1) { - if (nb_ones == offset) { - break; - } - - nb_ones++; - } - - index++; - bitmap_vals = bitmap_vals >> 1; - } - - return index; - } - - size_type next_capacity() const noexcept { - return static_cast(m_capacity + CAPACITY_GROWTH_STEP); - } - - /** - * Insertion - * - * Two situations: - * - Either we are in a situation where - * std::is_nothrow_move_constructible::value is true. In this - * case, on insertion we just reallocate m_values when we reach its capacity - * (i.e. m_nb_elements == m_capacity), otherwise we just put the new value at - * its appropriate place. We can easily keep the strong exception guarantee as - * moving the values around is safe. - * - Otherwise we are in a situation where - * std::is_nothrow_move_constructible::value is false. In this - * case on EACH insertion we allocate a new area of m_nb_elements + 1 where we - * copy the values of m_values into it and put the new value there. On - * success, we set m_values to this new area. Even if slower, it's the only - * way to preserve to strong exception guarantee. - */ - template ::value>::type * = nullptr> - void insert_at_offset(allocator_type &alloc, size_type offset, - Args &&...value_args) { - if (m_nb_elements < m_capacity) { - insert_at_offset_no_realloc(alloc, offset, - std::forward(value_args)...); - } else { - insert_at_offset_realloc(alloc, offset, next_capacity(), - std::forward(value_args)...); - } - } - - template ::value>::type * = nullptr> - void insert_at_offset(allocator_type &alloc, size_type offset, - Args &&...value_args) { - insert_at_offset_realloc(alloc, offset, m_nb_elements + 1, - std::forward(value_args)...); - } - - template ::value>::type * = nullptr> - void insert_at_offset_no_realloc(allocator_type &alloc, size_type offset, - Args &&...value_args) { - tsl_sh_assert(offset <= m_nb_elements); - tsl_sh_assert(m_nb_elements < m_capacity); - - for (size_type i = m_nb_elements; i > offset; i--) { - construct_value(alloc, m_values + i, std::move(m_values[i - 1])); - destroy_value(alloc, m_values + i - 1); - } - - try { - construct_value(alloc, m_values + offset, - std::forward(value_args)...); - } catch (...) { - for (size_type i = offset; i < m_nb_elements; i++) { - construct_value(alloc, m_values + i, std::move(m_values[i + 1])); - destroy_value(alloc, m_values + i + 1); - } - throw; - } - } - - template ::value>::type * = nullptr> - void insert_at_offset_realloc(allocator_type &alloc, size_type offset, - size_type new_capacity, Args &&...value_args) { - tsl_sh_assert(new_capacity > m_nb_elements); - - pointer new_values = alloc.allocate(new_capacity); - // Allocate should throw if there is a failure - tsl_sh_assert(new_values != nullptr); - - try { - construct_value(alloc, new_values + offset, - std::forward(value_args)...); - } catch (...) { - alloc.deallocate(new_values, new_capacity); - throw; - } - - // Should not throw from here - for (size_type i = 0; i < offset; i++) { - construct_value(alloc, new_values + i, std::move(m_values[i])); - } - - for (size_type i = offset; i < m_nb_elements; i++) { - construct_value(alloc, new_values + i + 1, std::move(m_values[i])); - } - - destroy_and_deallocate_values(alloc, m_values, m_nb_elements, m_capacity); - - m_values = new_values; - m_capacity = new_capacity; - } - - template ::value>::type * = nullptr> - void insert_at_offset_realloc(allocator_type &alloc, size_type offset, - size_type new_capacity, Args &&...value_args) { - tsl_sh_assert(new_capacity > m_nb_elements); - - value_type *new_values = alloc.allocate(new_capacity); - // Allocate should throw if there is a failure - tsl_sh_assert(new_values != nullptr); - - size_type nb_new_values = 0; - try { - for (size_type i = 0; i < offset; i++) { - construct_value(alloc, new_values + i, m_values[i]); - nb_new_values++; - } - - construct_value(alloc, new_values + offset, - std::forward(value_args)...); - nb_new_values++; - - for (size_type i = offset; i < m_nb_elements; i++) { - construct_value(alloc, new_values + i + 1, m_values[i]); - nb_new_values++; - } - } catch (...) { - destroy_and_deallocate_values(alloc, new_values, nb_new_values, - new_capacity); - throw; - } - - tsl_sh_assert(nb_new_values == m_nb_elements + 1); - - destroy_and_deallocate_values(alloc, m_values, m_nb_elements, m_capacity); - - m_values = new_values; - m_capacity = new_capacity; - } - - /** - * Erasure - * - * Two situations: - * - Either we are in a situation where - * std::is_nothrow_move_constructible::value is true. Simply - * destroy the value and left-shift move the value on the right of offset. - * - Otherwise we are in a situation where - * std::is_nothrow_move_constructible::value is false. Copy all - * the values except the one at offset into a new heap area. On success, we - * set m_values to this new area. Even if slower, it's the only way to - * preserve to strong exception guarantee. - */ - template ::value>::type * = nullptr> - void erase_at_offset(allocator_type &alloc, size_type offset) noexcept { - tsl_sh_assert(offset < m_nb_elements); - - destroy_value(alloc, m_values + offset); - - for (size_type i = offset + 1; i < m_nb_elements; i++) { - construct_value(alloc, m_values + i - 1, std::move(m_values[i])); - destroy_value(alloc, m_values + i); - } - } - - template ::value>::type * = nullptr> - void erase_at_offset(allocator_type &alloc, size_type offset) { - tsl_sh_assert(offset < m_nb_elements); - - // Erasing the last element, don't need to reallocate. We keep the capacity. - if (offset + 1 == m_nb_elements) { - destroy_value(alloc, m_values + offset); - return; - } - - tsl_sh_assert(m_nb_elements > 1); - const size_type new_capacity = m_nb_elements - 1; - - value_type *new_values = alloc.allocate(new_capacity); - // Allocate should throw if there is a failure - tsl_sh_assert(new_values != nullptr); - - size_type nb_new_values = 0; - try { - for (size_type i = 0; i < m_nb_elements; i++) { - if (i != offset) { - construct_value(alloc, new_values + nb_new_values, m_values[i]); - nb_new_values++; - } - } - } catch (...) { - destroy_and_deallocate_values(alloc, new_values, nb_new_values, - new_capacity); - throw; - } - - tsl_sh_assert(nb_new_values == m_nb_elements - 1); - - destroy_and_deallocate_values(alloc, m_values, m_nb_elements, m_capacity); - - m_values = new_values; - m_capacity = new_capacity; - } - - private: - pointer m_values; - - bitmap_type m_bitmap_vals; - bitmap_type m_bitmap_deleted_vals; - - size_type m_nb_elements; - size_type m_capacity; - bool m_last_array; -}; - -/** - * Internal common class used by `sparse_map` and `sparse_set`. - * - * `ValueType` is what will be stored by `sparse_hash` (usually `std::pair` for map and `Key` for set). - * - * `KeySelect` should be a `FunctionObject` which takes a `ValueType` in - * parameter and returns a reference to the key. - * - * `ValueSelect` should be a `FunctionObject` which takes a `ValueType` in - * parameter and returns a reference to the value. `ValueSelect` should be void - * if there is no value (in a set for example). - * - * The strong exception guarantee only holds if `ExceptionSafety` is set to - * `dice::sh::exception_safety::strong`. - * - * `ValueType` must be nothrow move constructible and/or copy constructible. - * Behaviour is undefined if the destructor of `ValueType` throws. - * - * - * The class holds its buckets in a 2-dimensional fashion. Instead of having a - * linear `std::vector` for [0, bucket_count) where each bucket stores - * one value, we have a `std::vector` (m_sparse_buckets_data) - * where each `sparse_array` stores multiple values (up to - * `sparse_array::BITMAP_NB_BITS`). To convert a one dimensional `ibucket` - * position to a position in `std::vector` and a position in - * `sparse_array`, use respectively the methods - * `sparse_array::sparse_ibucket(ibucket)` and - * `sparse_array::index_in_sparse_bucket(ibucket)`. - */ -template -class sparse_hash : private Allocator, - private Hash, - private KeyEqual, - private GrowthPolicy { - private: - template - using has_mapped_type = - typename std::integral_constant::value>; - - static_assert( - noexcept(std::declval().bucket_for_hash(std::size_t(0))), - "GrowthPolicy::bucket_for_hash must be noexcept."); - static_assert(noexcept(std::declval().clear()), - "GrowthPolicy::clear must be noexcept."); - - public: - template - class sparse_iterator; - - using key_type = typename KeySelect::key_type; - using value_type = ValueType; - using hasher = Hash; - using key_equal = KeyEqual; - using allocator_type = Allocator; - using reference = value_type &; - using const_reference = const value_type &; - using size_type = typename std::allocator_traits::size_type; - using pointer = typename std::allocator_traits::pointer; - using const_pointer = typename std::allocator_traits::const_pointer; - using difference_type = typename std::allocator_traits::difference_type; - using iterator = sparse_iterator; - using const_iterator = sparse_iterator; - - private: - using sparse_array = - dice::sparse_map::detail_sparse_hash::sparse_array; - - using sparse_buckets_allocator = typename std::allocator_traits< - allocator_type>::template rebind_alloc; - using sparse_buckets_container = - boost::container::vector; - public: - /** - * The `operator*()` and `operator->()` methods return a const reference and - * const pointer respectively to the stored value type (`Key` for a set, - * `std::pair` for a map). - * - * In case of a map, to get a mutable reference to the value `T` associated to - * a key (the `.second` in the stored pair), you have to call `value()`. - */ - template - class sparse_iterator { - friend class sparse_hash; - - private: - using sparse_bucket_iterator = typename std::conditional< - IsConst, typename sparse_buckets_container::const_iterator, - typename sparse_buckets_container::iterator>::type; - - using sparse_array_iterator = - typename std::conditional::type; - - /** - * sparse_array_it should be nullptr if sparse_bucket_it == - * m_sparse_buckets_data.end(). (TODO better way?) - */ - sparse_iterator(sparse_bucket_iterator sparse_bucket_it, - sparse_array_iterator sparse_array_it) - : m_sparse_buckets_it(sparse_bucket_it), - m_sparse_array_it(sparse_array_it) {} - - public: - using iterator_category = std::forward_iterator_tag; - using value_type = const typename sparse_hash::value_type; - using difference_type = std::ptrdiff_t; - using reference = value_type &; - using pointer = typename sparse_hash::const_pointer; - - sparse_iterator() noexcept {} - - // Copy constructor from iterator to const_iterator. - template ::type * = nullptr> - sparse_iterator(const sparse_iterator &other) noexcept - : m_sparse_buckets_it(other.m_sparse_buckets_it), - m_sparse_array_it(other.m_sparse_array_it) {} - - sparse_iterator(const sparse_iterator &other) = default; - sparse_iterator(sparse_iterator &&other) = default; - sparse_iterator &operator=(const sparse_iterator &other) = default; - sparse_iterator &operator=(sparse_iterator &&other) = default; - - const typename sparse_hash::key_type &key() const { - return KeySelect()(*m_sparse_array_it); - } - - template ::value && - IsConst>::type * = nullptr> - const typename U::value_type &value() const { - return U()(*m_sparse_array_it); - } - - template ::value && - !IsConst>::type * = nullptr> - typename U::value_type &value() { - return U()(*m_sparse_array_it); - } - - reference operator*() const { return *m_sparse_array_it; } - - //with fancy pointers addressof might be problematic. - pointer operator->() const { return std::addressof(*m_sparse_array_it); } - - sparse_iterator &operator++() { - tsl_sh_assert(m_sparse_array_it != nullptr); - ++m_sparse_array_it; - - //vector iterator with fancy pointers have a problem with -> - if (m_sparse_array_it == (*m_sparse_buckets_it).end()) { - do { - if ((*m_sparse_buckets_it).last()) { - ++m_sparse_buckets_it; - m_sparse_array_it = nullptr; - return *this; - } - - ++m_sparse_buckets_it; - } while ((*m_sparse_buckets_it).empty()); - - m_sparse_array_it = (*m_sparse_buckets_it).begin(); - } - - return *this; - } - - sparse_iterator operator++(int) { - sparse_iterator tmp(*this); - ++*this; - - return tmp; - } - - friend bool operator==(const sparse_iterator &lhs, - const sparse_iterator &rhs) { - return lhs.m_sparse_buckets_it == rhs.m_sparse_buckets_it && - lhs.m_sparse_array_it == rhs.m_sparse_array_it; - } - - friend bool operator!=(const sparse_iterator &lhs, - const sparse_iterator &rhs) { - return !(lhs == rhs); - } - - private: - sparse_bucket_iterator m_sparse_buckets_it; - sparse_array_iterator m_sparse_array_it; - }; - - public: - sparse_hash(size_type bucket_count, const Hash &hash, const KeyEqual &equal, - const Allocator &alloc, float max_load_factor) - : Allocator(alloc), - Hash(hash), - KeyEqual(equal), - GrowthPolicy(bucket_count), - m_sparse_buckets_data(alloc), - // m_sparse_buckets_data(std::allocator_traits::rebind_alloc(alloc)), - m_sparse_buckets(static_empty_sparse_bucket_ptr()), - m_bucket_count(bucket_count), - m_nb_elements(0), - m_nb_deleted_buckets(0) { - if (m_bucket_count > max_bucket_count()) { - throw std::length_error("The map exceeds its maximum size."); - } - - if (m_bucket_count > 0) { - /* - * We can't use the `vector(size_type count, const Allocator& alloc)` - * constructor as it's only available in C++14 and we need to support - * C++11. We thus must resize after using the `vector(const Allocator& - * alloc)` constructor. - * - * We can't use `vector(size_type count, const T& value, const Allocator& - * alloc)` as it requires the value T to be copyable. - */ - m_sparse_buckets_data.resize( - sparse_array::nb_sparse_buckets(bucket_count)); - m_sparse_buckets = m_sparse_buckets_data.data(); - - tsl_sh_assert(!m_sparse_buckets_data.empty()); - m_sparse_buckets_data.back().set_as_last(); - } - - this->max_load_factor(max_load_factor); - - // Check in the constructor instead of outside of a function to avoid - // compilation issues when value_type is not complete. - static_assert(std::is_nothrow_move_constructible::value || - std::is_copy_constructible::value, - "Key, and T if present, must be nothrow move constructible " - "and/or copy constructible."); - } - - ~sparse_hash() { clear(); } - - sparse_hash(const sparse_hash &other) - : Allocator(std::allocator_traits< - Allocator>::select_on_container_copy_construction(other)), - Hash(other), - KeyEqual(other), - GrowthPolicy(other), - m_sparse_buckets_data( - std::allocator_traits< - Allocator>::select_on_container_copy_construction(other)), - m_bucket_count(other.m_bucket_count), - m_nb_elements(other.m_nb_elements), - m_nb_deleted_buckets(other.m_nb_deleted_buckets), - m_load_threshold_rehash(other.m_load_threshold_rehash), - m_load_threshold_clear_deleted(other.m_load_threshold_clear_deleted), - m_max_load_factor(other.m_max_load_factor) { - copy_buckets_from(other), - m_sparse_buckets = m_sparse_buckets_data.empty() - ? static_empty_sparse_bucket_ptr() - : m_sparse_buckets_data.data(); - } - - sparse_hash(sparse_hash &&other) noexcept( - std::is_nothrow_move_constructible::value - &&std::is_nothrow_move_constructible::value - &&std::is_nothrow_move_constructible::value - &&std::is_nothrow_move_constructible::value - &&std::is_nothrow_move_constructible< - sparse_buckets_container>::value) - : Allocator(std::move(other)), - Hash(std::move(other)), - KeyEqual(std::move(other)), - GrowthPolicy(std::move(other)), - m_sparse_buckets_data(std::move(other.m_sparse_buckets_data)), - m_sparse_buckets(m_sparse_buckets_data.empty() - ? static_empty_sparse_bucket_ptr() - : m_sparse_buckets_data.data()), - m_bucket_count(other.m_bucket_count), - m_nb_elements(other.m_nb_elements), - m_nb_deleted_buckets(other.m_nb_deleted_buckets), - m_load_threshold_rehash(other.m_load_threshold_rehash), - m_load_threshold_clear_deleted(other.m_load_threshold_clear_deleted), - m_max_load_factor(other.m_max_load_factor) { - other.GrowthPolicy::clear(); - other.m_sparse_buckets_data.clear(); - other.m_sparse_buckets = static_empty_sparse_bucket_ptr(); - other.m_bucket_count = 0; - other.m_nb_elements = 0; - other.m_nb_deleted_buckets = 0; - other.m_load_threshold_rehash = 0; - other.m_load_threshold_clear_deleted = 0; - } - - sparse_hash &operator=(const sparse_hash &other) { - if (this != &other) { - clear(); - - if (std::allocator_traits< - Allocator>::propagate_on_container_copy_assignment::value) { - Allocator::operator=(other); - } - - Hash::operator=(other); - KeyEqual::operator=(other); - GrowthPolicy::operator=(other); - - if (std::allocator_traits< - Allocator>::propagate_on_container_copy_assignment::value) { - m_sparse_buckets_data = - sparse_buckets_container(static_cast(other)); - } else { - if (m_sparse_buckets_data.size() != - other.m_sparse_buckets_data.size()) { - m_sparse_buckets_data = - sparse_buckets_container(static_cast(*this)); - } else { - m_sparse_buckets_data.clear(); - } - } - - copy_buckets_from(other); - m_sparse_buckets = m_sparse_buckets_data.empty() - ? static_empty_sparse_bucket_ptr() - : m_sparse_buckets_data.data(); - - m_bucket_count = other.m_bucket_count; - m_nb_elements = other.m_nb_elements; - m_nb_deleted_buckets = other.m_nb_deleted_buckets; - m_load_threshold_rehash = other.m_load_threshold_rehash; - m_load_threshold_clear_deleted = other.m_load_threshold_clear_deleted; - m_max_load_factor = other.m_max_load_factor; - } - - return *this; - } - - sparse_hash &operator=(sparse_hash &&other) noexcept { - clear(); - - if (not std::allocator_traits< - Allocator>::propagate_on_container_move_assignment::value - and (static_cast(*this) != static_cast(other))) { - move_buckets_from(std::move(other)); - } else { - static_cast(*this) = std::move(static_cast(other)); - m_sparse_buckets_data = std::move(other.m_sparse_buckets_data); - } - - m_sparse_buckets = m_sparse_buckets_data.empty() - ? static_empty_sparse_bucket_ptr() - : m_sparse_buckets_data.data(); - - static_cast(*this) = std::move(static_cast(other)); - static_cast(*this) = std::move(static_cast(other)); - static_cast(*this) = - std::move(static_cast(other)); - m_bucket_count = other.m_bucket_count; - m_nb_elements = other.m_nb_elements; - m_nb_deleted_buckets = other.m_nb_deleted_buckets; - m_load_threshold_rehash = other.m_load_threshold_rehash; - m_load_threshold_clear_deleted = other.m_load_threshold_clear_deleted; - m_max_load_factor = other.m_max_load_factor; - - other.GrowthPolicy::clear(); - other.m_sparse_buckets_data.clear(); - other.m_sparse_buckets = static_empty_sparse_bucket_ptr(); - other.m_bucket_count = 0; - other.m_nb_elements = 0; - other.m_nb_deleted_buckets = 0; - other.m_load_threshold_rehash = 0; - other.m_load_threshold_clear_deleted = 0; - - return *this; - } - - allocator_type get_allocator() const { - return static_cast(*this); - } - - /* - * Iterators - */ - iterator begin() noexcept { - auto begin = m_sparse_buckets_data.begin(); - //vector iterator with fancy pointers have a problem with -> - while (begin != m_sparse_buckets_data.end() && (*begin).empty()) { - ++begin; - } - - //vector iterator with fancy pointers have a problem with -> - return iterator(begin, (begin != m_sparse_buckets_data.end()) - ? (*begin).begin() - : nullptr); - } - - const_iterator begin() const noexcept { return cbegin(); } - - const_iterator cbegin() const noexcept { - auto begin = m_sparse_buckets_data.cbegin(); - //vector iterator with fancy pointers have a problem with -> - while (begin != m_sparse_buckets_data.cend() && (*begin).empty()) { - ++begin; - } - - return const_iterator(begin, (begin != m_sparse_buckets_data.cend()) - ? (*begin).cbegin() - : nullptr); - } - - iterator end() noexcept { - return iterator(m_sparse_buckets_data.end(), nullptr); - } - - const_iterator end() const noexcept { return cend(); } - - const_iterator cend() const noexcept { - return const_iterator(m_sparse_buckets_data.cend(), nullptr); - } - - /* - * Capacity - */ - bool empty() const noexcept { return m_nb_elements == 0; } - - size_type size() const noexcept { return m_nb_elements; } - - size_type max_size() const noexcept { - return std::min(std::allocator_traits::max_size(), - m_sparse_buckets_data.max_size()); - } - - /* - * Modifiers - */ - void clear() noexcept { - for (auto &bucket : m_sparse_buckets_data) { - bucket.clear(*this); - } - - m_nb_elements = 0; - m_nb_deleted_buckets = 0; - } - - template - std::pair insert(P &&value) { - return insert_impl(KeySelect()(value), std::forward

(value)); - } - - template - iterator insert_hint(const_iterator hint, P &&value) { - if (hint != cend() && - compare_keys(KeySelect()(*hint), KeySelect()(value))) { - return mutable_iterator(hint); - } - - return insert(std::forward

(value)).first; - } - - template - void insert(InputIt first, InputIt last) { - if (std::is_base_of< - std::forward_iterator_tag, - typename std::iterator_traits::iterator_category>::value) { - const auto nb_elements_insert = std::distance(first, last); - const size_type nb_free_buckets = m_load_threshold_rehash - size(); - tsl_sh_assert(m_load_threshold_rehash >= size()); - - if (nb_elements_insert > 0 && - nb_free_buckets < size_type(nb_elements_insert)) { - reserve(size() + size_type(nb_elements_insert)); - } - } - - for (; first != last; ++first) { - insert(*first); - } - } - - template - std::pair insert_or_assign(K &&key, M &&obj) { - auto it = try_emplace(std::forward(key), std::forward(obj)); - if (!it.second) { - it.first.value() = std::forward(obj); - } - - return it; - } - - template - iterator insert_or_assign(const_iterator hint, K &&key, M &&obj) { - if (hint != cend() && compare_keys(KeySelect()(*hint), key)) { - auto it = mutable_iterator(hint); - it.value() = std::forward(obj); - - return it; - } - - return insert_or_assign(std::forward(key), std::forward(obj)).first; - } - - template - std::pair emplace(Args &&...args) { - return insert(value_type(std::forward(args)...)); - } - - template - iterator emplace_hint(const_iterator hint, Args &&...args) { - return insert_hint(hint, value_type(std::forward(args)...)); - } - - template - std::pair try_emplace(K &&key, Args &&...args) { - return insert_impl(key, std::piecewise_construct, - std::forward_as_tuple(std::forward(key)), - std::forward_as_tuple(std::forward(args)...)); - } - - template - iterator try_emplace_hint(const_iterator hint, K &&key, Args &&...args) { - if (hint != cend() && compare_keys(KeySelect()(*hint), key)) { - return mutable_iterator(hint); - } - - return try_emplace(std::forward(key), std::forward(args)...).first; - } - - /** - * Here to avoid `template size_type erase(const K& key)` being used - * when we use an iterator instead of a const_iterator. - */ - iterator erase(iterator pos) { - tsl_sh_assert(pos != end() && m_nb_elements > 0); - //vector iterator with fancy pointers have a problem with -> - auto it_sparse_array_next = - (*pos.m_sparse_buckets_it).erase(*this, pos.m_sparse_array_it); - m_nb_elements--; - m_nb_deleted_buckets++; - - if (it_sparse_array_next == (*pos.m_sparse_buckets_it).end()) { - auto it_sparse_buckets_next = pos.m_sparse_buckets_it; - do { - ++it_sparse_buckets_next; - } while (it_sparse_buckets_next != m_sparse_buckets_data.end() && - (*it_sparse_buckets_next).empty()); - - if (it_sparse_buckets_next == m_sparse_buckets_data.end()) { - return end(); - } else { - return iterator(it_sparse_buckets_next, - (*it_sparse_buckets_next).begin()); - } - } else { - return iterator(pos.m_sparse_buckets_it, it_sparse_array_next); - } - } - - iterator erase(const_iterator pos) { return erase(mutable_iterator(pos)); } - - iterator erase(const_iterator first, const_iterator last) { - if (first == last) { - return mutable_iterator(first); - } - - // TODO Optimize, could avoid the call to std::distance. - const size_type nb_elements_to_erase = - static_cast(std::distance(first, last)); - auto to_delete = mutable_iterator(first); - for (size_type i = 0; i < nb_elements_to_erase; i++) { - to_delete = erase(to_delete); - } - - return to_delete; - } - - template - size_type erase(const K &key) { - return erase(key, hash_key(key)); - } - - template - size_type erase(const K &key, std::size_t hash) { - return erase_impl(key, hash); - } - - void swap(sparse_hash &other) { - using std::swap; - - if (std::allocator_traits::propagate_on_container_swap::value) { - swap(static_cast(*this), static_cast(other)); - } else { - tsl_sh_assert(static_cast(*this) == - static_cast(other)); - } - - swap(static_cast(*this), static_cast(other)); - swap(static_cast(*this), static_cast(other)); - swap(static_cast(*this), - static_cast(other)); - swap(m_sparse_buckets_data, other.m_sparse_buckets_data); - swap(m_sparse_buckets, other.m_sparse_buckets); - swap(m_bucket_count, other.m_bucket_count); - swap(m_nb_elements, other.m_nb_elements); - swap(m_nb_deleted_buckets, other.m_nb_deleted_buckets); - swap(m_load_threshold_rehash, other.m_load_threshold_rehash); - swap(m_load_threshold_clear_deleted, other.m_load_threshold_clear_deleted); - swap(m_max_load_factor, other.m_max_load_factor); - } - - /* - * Lookup - */ - template < - class K, class U = ValueSelect, - typename std::enable_if::value>::type * = nullptr> - typename U::value_type &at(const K &key) { - return at(key, hash_key(key)); - } - - template < - class K, class U = ValueSelect, - typename std::enable_if::value>::type * = nullptr> - typename U::value_type &at(const K &key, std::size_t hash) { - return const_cast( - static_cast(this)->at(key, hash)); - } - - template < - class K, class U = ValueSelect, - typename std::enable_if::value>::type * = nullptr> - const typename U::value_type &at(const K &key) const { - return at(key, hash_key(key)); - } - - template < - class K, class U = ValueSelect, - typename std::enable_if::value>::type * = nullptr> - const typename U::value_type &at(const K &key, std::size_t hash) const { - auto it = find(key, hash); - if (it != cend()) { - return it.value(); - } else { - throw std::out_of_range("Couldn't find key."); - } - } - - template < - class K, class U = ValueSelect, - typename std::enable_if::value>::type * = nullptr> - typename U::value_type &operator[](K &&key) { - return try_emplace(std::forward(key)).first.value(); - } - - template - bool contains(const K &key) const { - return contains(key, hash_key(key)); - } - - template - bool contains(const K &key, std::size_t hash) const { - return count(key, hash) != 0; - } - - template - size_type count(const K &key) const { - return count(key, hash_key(key)); - } - - template - size_type count(const K &key, std::size_t hash) const { - if (find(key, hash) != cend()) { - return 1; - } else { - return 0; - } - } - - template - iterator find(const K &key) { - return find_impl(key, hash_key(key)); - } - - template - iterator find(const K &key, std::size_t hash) { - return find_impl(key, hash); - } - - template - const_iterator find(const K &key) const { - return find_impl(key, hash_key(key)); - } - - template - const_iterator find(const K &key, std::size_t hash) const { - return find_impl(key, hash); - } - - template - std::pair equal_range(const K &key) { - return equal_range(key, hash_key(key)); - } - - template - std::pair equal_range(const K &key, std::size_t hash) { - iterator it = find(key, hash); - return std::make_pair(it, (it == end()) ? it : std::next(it)); - } - - template - std::pair equal_range(const K &key) const { - return equal_range(key, hash_key(key)); - } - - template - std::pair equal_range( - const K &key, std::size_t hash) const { - const_iterator it = find(key, hash); - return std::make_pair(it, (it == cend()) ? it : std::next(it)); - } - - /* - * Bucket interface - */ - size_type bucket_count() const { return m_bucket_count; } - - size_type max_bucket_count() const { - return m_sparse_buckets_data.max_size(); - } - - /* - * Hash policy - */ - float load_factor() const { - if (bucket_count() == 0) { - return 0; - } - - return float(m_nb_elements) / float(bucket_count()); - } - - float max_load_factor() const { return m_max_load_factor; } - - void max_load_factor(float ml) { - m_max_load_factor = std::max(0.1f, std::min(ml, 0.8f)); - m_load_threshold_rehash = - size_type(float(bucket_count()) * m_max_load_factor); - - const float max_load_factor_with_deleted_buckets = - m_max_load_factor + 0.5f * (1.0f - m_max_load_factor); - tsl_sh_assert(max_load_factor_with_deleted_buckets > 0.0f && - max_load_factor_with_deleted_buckets <= 1.0f); - m_load_threshold_clear_deleted = - size_type(float(bucket_count()) * max_load_factor_with_deleted_buckets); - } - - void rehash(size_type count) { - count = std::max(count, - size_type(std::ceil(float(size()) / max_load_factor()))); - rehash_impl(count); - } - - void reserve(size_type count) { - rehash(size_type(std::ceil(float(count) / max_load_factor()))); - } - - /* - * Observers - */ - hasher hash_function() const { return static_cast(*this); } - - key_equal key_eq() const { return static_cast(*this); } - - /* - * Other - */ - iterator mutable_iterator(const_iterator pos) { - auto it_sparse_buckets = - m_sparse_buckets_data.begin() + - std::distance(m_sparse_buckets_data.cbegin(), pos.m_sparse_buckets_it); - - return iterator(it_sparse_buckets, - sparse_array::mutable_iterator(pos.m_sparse_array_it)); - } - - template - void serialize(Serializer &serializer) const { - serialize_impl(serializer); - } - - template - void deserialize(Deserializer &deserializer, bool hash_compatible) { - deserialize_impl(deserializer, hash_compatible); - } - - private: - template - std::size_t hash_key(const K &key) const { - return Hash::operator()(key); - } - - template - bool compare_keys(const K1 &key1, const K2 &key2) const { - return KeyEqual::operator()(key1, key2); - } - - size_type bucket_for_hash(std::size_t hash) const { - const std::size_t bucket = GrowthPolicy::bucket_for_hash(hash); - tsl_sh_assert(sparse_array::sparse_ibucket(bucket) < - m_sparse_buckets_data.size() || - (bucket == 0 && m_sparse_buckets_data.empty())); - - return bucket; - } - - template ::value>::type * = - nullptr> - size_type next_bucket(size_type ibucket, size_type iprobe) const { - (void)iprobe; - if (Probing == dice::sparse_map::sh::probing::linear) { - return (ibucket + 1) & this->m_mask; - } else { - tsl_sh_assert(Probing == dice::sparse_map::sh::probing::quadratic); - return (ibucket + iprobe) & this->m_mask; - } - } - - template ::value>::type * = - nullptr> - size_type next_bucket(size_type ibucket, size_type iprobe) const { - (void)iprobe; - if (Probing == dice::sparse_map::sh::probing::linear) { - ibucket++; - return (ibucket != bucket_count()) ? ibucket : 0; - } else { - tsl_sh_assert(Probing == dice::sparse_map::sh::probing::quadratic); - ibucket += iprobe; - return (ibucket < bucket_count()) ? ibucket : ibucket % bucket_count(); - } - } - - // TODO encapsulate m_sparse_buckets_data to avoid the managing the allocator - void copy_buckets_from(const sparse_hash &other) { - m_sparse_buckets_data.reserve(other.m_sparse_buckets_data.size()); - - try { - for (const auto &bucket : other.m_sparse_buckets_data) { - m_sparse_buckets_data.emplace_back(bucket, - static_cast(*this)); - } - } catch (...) { - clear(); - throw; - } - - tsl_sh_assert(m_sparse_buckets_data.empty() || - m_sparse_buckets_data.back().last()); - } - - void move_buckets_from(sparse_hash &&other) { - m_sparse_buckets_data.reserve(other.m_sparse_buckets_data.size()); - - try { - for (auto &&bucket : other.m_sparse_buckets_data) { - m_sparse_buckets_data.emplace_back(std::move(bucket), - static_cast(*this)); - } - } catch (...) { - clear(); - throw; - } - - tsl_sh_assert(m_sparse_buckets_data.empty() || - m_sparse_buckets_data.back().last()); - } - - template - std::pair insert_impl(const K &key, - Args &&...value_type_args) { - if (size() >= m_load_threshold_rehash) { - rehash_impl(GrowthPolicy::next_bucket_count()); - } else if (size() + m_nb_deleted_buckets >= - m_load_threshold_clear_deleted) { - clear_deleted_buckets(); - } - tsl_sh_assert(!m_sparse_buckets_data.empty()); - - /** - * We must insert the value in the first empty or deleted bucket we find. If - * we first find a deleted bucket, we still have to continue the search - * until we find an empty bucket or until we have searched all the buckets - * to be sure that the value is not in the hash table. We thus remember the - * position, if any, of the first deleted bucket we have encountered so we - * can insert it there if needed. - */ - bool found_first_deleted_bucket = false; - std::size_t sparse_ibucket_first_deleted = 0; - typename sparse_array::size_type index_in_sparse_bucket_first_deleted = 0; - - const std::size_t hash = hash_key(key); - std::size_t ibucket = bucket_for_hash(hash); - - std::size_t probe = 0; - while (true) { - std::size_t sparse_ibucket = sparse_array::sparse_ibucket(ibucket); - auto index_in_sparse_bucket = - sparse_array::index_in_sparse_bucket(ibucket); - - if (m_sparse_buckets != static_empty_sparse_bucket_ptr()) { - if (m_sparse_buckets[sparse_ibucket].has_value(index_in_sparse_bucket)) { - auto value_it = - m_sparse_buckets[sparse_ibucket].value(index_in_sparse_bucket); - if (compare_keys(key, KeySelect()(*value_it))) { - return std::make_pair( - iterator(m_sparse_buckets_data.begin() + sparse_ibucket, - value_it), - false); - } - } else if (m_sparse_buckets[sparse_ibucket].has_deleted_value( - index_in_sparse_bucket) && - probe < m_bucket_count) { - if (!found_first_deleted_bucket) { - found_first_deleted_bucket = true; - sparse_ibucket_first_deleted = sparse_ibucket; - index_in_sparse_bucket_first_deleted = index_in_sparse_bucket; - } - } else if (found_first_deleted_bucket) { - auto it = insert_in_bucket(sparse_ibucket_first_deleted, - index_in_sparse_bucket_first_deleted, - std::forward(value_type_args)...); - m_nb_deleted_buckets--; - - return it; - } - else { - return insert_in_bucket(sparse_ibucket, index_in_sparse_bucket, - std::forward(value_type_args)...); - } - }else { - return insert_in_bucket(sparse_ibucket, index_in_sparse_bucket, - std::forward(value_type_args)...); - } - - probe++; - ibucket = next_bucket(ibucket, probe); - } - } - - template - std::pair insert_in_bucket( - std::size_t sparse_ibucket, - typename sparse_array::size_type index_in_sparse_bucket, - Args &&...value_type_args) { - // is not called when empty - auto value_it = m_sparse_buckets[sparse_ibucket].set( - *this, index_in_sparse_bucket, std::forward(value_type_args)...); - m_nb_elements++; - - return std::make_pair( - iterator(m_sparse_buckets_data.begin() + sparse_ibucket, value_it), - true); - } - - template - size_type erase_impl(const K &key, std::size_t hash) { - std::size_t ibucket = bucket_for_hash(hash); - - std::size_t probe = 0; - - if (m_sparse_buckets == static_empty_sparse_bucket_ptr()) - return 0; - while (true) { - const std::size_t sparse_ibucket = sparse_array::sparse_ibucket(ibucket); - const auto index_in_sparse_bucket = - sparse_array::index_in_sparse_bucket(ibucket); - - if (m_sparse_buckets[sparse_ibucket].has_value(index_in_sparse_bucket)) { - auto value_it = - m_sparse_buckets[sparse_ibucket].value(index_in_sparse_bucket); - if (compare_keys(key, KeySelect()(*value_it))) { - m_sparse_buckets[sparse_ibucket].erase(*this, value_it, - index_in_sparse_bucket); - m_nb_elements--; - m_nb_deleted_buckets++; - - return 1; - } - } else if (!m_sparse_buckets[sparse_ibucket].has_deleted_value( - index_in_sparse_bucket) || - probe >= m_bucket_count) { - return 0; - } - - probe++; - ibucket = next_bucket(ibucket, probe); - } - } - - template - iterator find_impl(const K &key, std::size_t hash) { - return mutable_iterator( - static_cast(this)->find(key, hash)); - } - - template - const_iterator find_impl(const K &key, std::size_t hash) const { - std::size_t ibucket = bucket_for_hash(hash); - - std::size_t probe = 0; - while (true) { - const std::size_t sparse_ibucket = sparse_array::sparse_ibucket(ibucket); - const auto index_in_sparse_bucket = - sparse_array::index_in_sparse_bucket(ibucket); - - if (m_sparse_buckets == static_empty_sparse_bucket_ptr()) { - return cend(); - }if (m_sparse_buckets[sparse_ibucket].has_value(index_in_sparse_bucket)) { - auto value_it = - m_sparse_buckets[sparse_ibucket].value(index_in_sparse_bucket); - if (compare_keys(key, KeySelect()(*value_it))) { - return const_iterator(m_sparse_buckets_data.cbegin() + sparse_ibucket, - value_it); - } - } else if (!m_sparse_buckets[sparse_ibucket].has_deleted_value( - index_in_sparse_bucket) || - probe >= m_bucket_count) { - return cend(); - } - - probe++; - ibucket = next_bucket(ibucket, probe); - } - } - - void clear_deleted_buckets() { - // TODO could be optimized, we could do it in-place instead of allocating a - // new bucket array. - rehash_impl(m_bucket_count); - tsl_sh_assert(m_nb_deleted_buckets == 0); - } - - template ::type - * = nullptr> - void rehash_impl(size_type count) { - sparse_hash new_table(count, static_cast(*this), - static_cast(*this), - static_cast(*this), m_max_load_factor); - - for (auto &bucket : m_sparse_buckets_data) { - for (auto &val : bucket) { - new_table.insert_on_rehash(std::move(val)); - } - - // TODO try to reuse some of the memory - bucket.clear(*this); - } - - new_table.swap(*this); - } - - /** - * TODO: For now we copy each element into the new map. We could move - * them if they are nothrow_move_constructible without triggering - * any exception if we reserve enough space in the sparse arrays beforehand. - */ - template ::type * = nullptr> - void rehash_impl(size_type count) { - sparse_hash new_table(count, static_cast(*this), - static_cast(*this), - static_cast(*this), m_max_load_factor); - - for (const auto &bucket : m_sparse_buckets_data) { - for (const auto &val : bucket) { - new_table.insert_on_rehash(val); - } - } - - new_table.swap(*this); - } - - template - void insert_on_rehash(K &&key_value) { - const key_type &key = KeySelect()(key_value); - - const std::size_t hash = hash_key(key); - std::size_t ibucket = bucket_for_hash(hash); - - std::size_t probe = 0; - while (true) { - std::size_t sparse_ibucket = sparse_array::sparse_ibucket(ibucket); - auto index_in_sparse_bucket = - sparse_array::index_in_sparse_bucket(ibucket); - - if (!m_sparse_buckets[sparse_ibucket].has_value(index_in_sparse_bucket)) { - m_sparse_buckets[sparse_ibucket].set(*this, index_in_sparse_bucket, - std::forward(key_value)); - m_nb_elements++; - - return; - } else { - tsl_sh_assert(!compare_keys( - key, KeySelect()(*m_sparse_buckets[sparse_ibucket].value( - index_in_sparse_bucket)))); - } - - probe++; - ibucket = next_bucket(ibucket, probe); - } - } - - template - void serialize_impl(Serializer &serializer) const { - const slz_size_type version = SERIALIZATION_PROTOCOL_VERSION; - serializer(version); - - const slz_size_type bucket_count = m_bucket_count; - serializer(bucket_count); - - const slz_size_type nb_sparse_buckets = m_sparse_buckets_data.size(); - serializer(nb_sparse_buckets); - - const slz_size_type nb_elements = m_nb_elements; - serializer(nb_elements); - - const slz_size_type nb_deleted_buckets = m_nb_deleted_buckets; - serializer(nb_deleted_buckets); - - const float max_load_factor = m_max_load_factor; - serializer(max_load_factor); - - for (const auto &bucket : m_sparse_buckets_data) { - bucket.serialize(serializer); - } - } - - template - void deserialize_impl(Deserializer &deserializer, bool hash_compatible) { - tsl_sh_assert( - m_bucket_count == 0 && - m_sparse_buckets_data.empty()); // Current hash table must be empty - - const slz_size_type version = - deserialize_value(deserializer); - // For now we only have one version of the serialization protocol. - // If it doesn't match there is a problem with the file. - if (version != SERIALIZATION_PROTOCOL_VERSION) { - throw std::runtime_error( - "Can't deserialize the sparse_map/set. The " - "protocol version header is invalid."); - } - - const slz_size_type bucket_count_ds = - deserialize_value(deserializer); - const slz_size_type nb_sparse_buckets = - deserialize_value(deserializer); - const slz_size_type nb_elements = - deserialize_value(deserializer); - const slz_size_type nb_deleted_buckets = - deserialize_value(deserializer); - const float max_load_factor = deserialize_value(deserializer); - - if (!hash_compatible) { - this->max_load_factor(max_load_factor); - reserve(numeric_cast(nb_elements, - "Deserialized nb_elements is too big.")); - for (slz_size_type ibucket = 0; ibucket < nb_sparse_buckets; ibucket++) { - sparse_array::deserialize_values_into_sparse_hash(deserializer, *this); - } - } else { - m_bucket_count = numeric_cast( - bucket_count_ds, "Deserialized bucket_count is too big."); - - GrowthPolicy::operator=(GrowthPolicy(m_bucket_count)); - // GrowthPolicy should not modify the bucket count we got from - // deserialization - if (m_bucket_count != bucket_count_ds) { - throw std::runtime_error( - "The GrowthPolicy is not the same even though " - "hash_compatible is true."); - } - - if (nb_sparse_buckets != - sparse_array::nb_sparse_buckets(m_bucket_count)) { - throw std::runtime_error("Deserialized nb_sparse_buckets is invalid."); - } - - m_nb_elements = numeric_cast( - nb_elements, "Deserialized nb_elements is too big."); - m_nb_deleted_buckets = numeric_cast( - nb_deleted_buckets, "Deserialized nb_deleted_buckets is too big."); - - m_sparse_buckets_data.reserve(numeric_cast( - nb_sparse_buckets, "Deserialized nb_sparse_buckets is too big.")); - for (slz_size_type ibucket = 0; ibucket < nb_sparse_buckets; ibucket++) { - m_sparse_buckets_data.emplace_back( - sparse_array::deserialize_hash_compatible( - deserializer, static_cast(*this))); - } - - if (!m_sparse_buckets_data.empty()) { - m_sparse_buckets_data.back().set_as_last(); - m_sparse_buckets = m_sparse_buckets_data.data(); - } - - this->max_load_factor(max_load_factor); - if (load_factor() > this->max_load_factor()) { - throw std::runtime_error( - "Invalid max_load_factor. Check that the serializer and " - "deserializer support " - "floats correctly as they can be converted implicitely to ints."); - } - } - } - - public: - static const size_type DEFAULT_INIT_BUCKET_COUNT = 0; - static constexpr float DEFAULT_MAX_LOAD_FACTOR = 0.5f; - - /** - * Protocol version currenlty used for serialization. - */ - static const slz_size_type SERIALIZATION_PROTOCOL_VERSION = 1; - - using sparse_array_ptr = typename std::allocator_traits::template rebind_traits::pointer; - /** - * Return an nullptr to indicate an empty bucket - */ - static sparse_array_ptr static_empty_sparse_bucket_ptr() { - return {}; - } - - private: - sparse_buckets_container m_sparse_buckets_data; - - - /** - * Points to m_sparse_buckets_data.data() if !m_sparse_buckets_data.empty() - * otherwise points to static_empty_sparse_bucket_ptr. This variable is useful - * to avoid the cost of checking if m_sparse_buckets_data is empty when trying - * to find an element. - * - * TODO Remove m_sparse_buckets_data and only use a pointer instead of a - * pointer+vector to save some space in the sparse_hash object. - */ - - sparse_array_ptr m_sparse_buckets; - - size_type m_bucket_count; - size_type m_nb_elements; - size_type m_nb_deleted_buckets; - - /** - * Maximum that m_nb_elements can reach before a rehash occurs automatically - * to grow the hash table. - */ - size_type m_load_threshold_rehash; - - /** - * Maximum that m_nb_elements + m_nb_deleted_buckets can reach before cleaning - * up the buckets marked as deleted. - */ - size_type m_load_threshold_clear_deleted; - float m_max_load_factor; -}; - -} // namespace detail_sparse_hash -} // namespace dice - -#endif diff --git a/include/dice/sparse-map/sparse_map.hpp b/include/dice/sparse-map/sparse_map.hpp deleted file mode 100644 index 8dc234c..0000000 --- a/include/dice/sparse-map/sparse_map.hpp +++ /dev/null @@ -1,801 +0,0 @@ -/** - * MIT License - * - * Copyright (c) 2017 Thibaut Goetghebuer-Planchon - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef DICE_SPARSE_MAP_SPARSE_MAP_HPP -#define DICE_SPARSE_MAP_SPARSE_MAP_HPP - -#include -#include -#include -#include -#include -#include - -#include "dice/sparse-map/sparse_hash.hpp" -#include "dice/sparse-map/boost_offset_pointer.hpp" - -namespace dice::sparse_map { - -/** - * Implementation of a sparse hash map using open-addressing with quadratic - * probing. The goal on the hash map is to be the most memory efficient - * possible, even at low load factor, while keeping reasonable performances. - * - * `GrowthPolicy` defines how the map grows and consequently how a hash value is - * mapped to a bucket. By default the map uses - * `dice::sh::power_of_two_growth_policy`. This policy keeps the number of - * buckets to a power of two and uses a mask to map the hash to a bucket instead - * of the slow modulo. Other growth policies are available and you may define - * your own growth policy, check `dice::sh::power_of_two_growth_policy` for the - * interface. - * - * `ExceptionSafety` defines the exception guarantee provided by the class. By - * default only the basic exception safety is guaranteed which mean that all - * resources used by the hash map will be freed (no memory leaks) but the hash - * map may end-up in an undefined state if an exception is thrown (undefined - * here means that some elements may be missing). This can ONLY happen on rehash - * (either on insert or if `rehash` is called explicitly) and will occur if the - * Allocator can't allocate memory (`std::bad_alloc`) or if the copy constructor - * (when a nothrow move constructor is not available) throws an exception. This - * can be avoided by calling `reserve` beforehand. This basic guarantee is - * similar to the one of `google::sparse_hash_map` and `spp::sparse_hash_map`. - * It is possible to ask for the strong exception guarantee with - * `dice::sh::exception_safety::strong`, the drawback is that the map will be - * slower on rehashes and will also need more memory on rehashes. - * - * `Sparsity` defines how much the hash set will compromise between insertion - * speed and memory usage. A high sparsity means less memory usage but longer - * insertion times, and vice-versa for low sparsity. The default - * `dice::sh::sparsity::medium` sparsity offers a good compromise. It doesn't - * change the lookup speed. - * - * `Key` and `T` must be nothrow move constructible and/or copy constructible. - * - * If the destructor of `Key` or `T` throws an exception, the behaviour of the - * class is undefined. - * - * Iterators invalidation: - * - clear, operator=, reserve, rehash: always invalidate the iterators. - * - insert, emplace, emplace_hint, operator[]: if there is an effective - * insert, invalidate the iterators. - * - erase: always invalidate the iterators. - */ -template , - class KeyEqual = std::equal_to, - class Allocator = std::allocator>, - class GrowthPolicy = dice::sparse_map::sh::power_of_two_growth_policy<2>, - dice::sparse_map::sh::exception_safety ExceptionSafety = - dice::sparse_map::sh::exception_safety::basic, - dice::sparse_map::sh::sparsity Sparsity = dice::sparse_map::sh::sparsity::medium> -class sparse_map { - private: - template - using has_is_transparent = dice::sparse_map::detail_sparse_hash::has_is_transparent; - - class KeySelect { - public: - using key_type = Key; - - const key_type &operator()( - const std::pair &key_value) const noexcept { - return key_value.first; - } - - key_type &operator()(std::pair &key_value) noexcept { - return key_value.first; - } - }; - - class ValueSelect { - public: - using value_type = T; - - const value_type &operator()( - const std::pair &key_value) const noexcept { - return key_value.second; - } - - value_type &operator()(std::pair &key_value) noexcept { - return key_value.second; - } - }; - - using ht = detail_sparse_hash::sparse_hash< - std::pair, KeySelect, ValueSelect, Hash, KeyEqual, Allocator, - GrowthPolicy, ExceptionSafety, Sparsity, dice::sparse_map::sh::probing::quadratic>; - - public: - using key_type = typename ht::key_type; - using mapped_type = T; - using value_type = typename ht::value_type; - using size_type = typename ht::size_type; - using difference_type = typename ht::difference_type; - using hasher = typename ht::hasher; - using key_equal = typename ht::key_equal; - using allocator_type = typename ht::allocator_type; - using reference = typename ht::reference; - using const_reference = typename ht::const_reference; - using pointer = typename ht::pointer; - using const_pointer = typename ht::const_pointer; - using iterator = typename ht::iterator; - using const_iterator = typename ht::const_iterator; - - public: - /* - * Constructors - */ - sparse_map() : sparse_map(ht::DEFAULT_INIT_BUCKET_COUNT) {} - - explicit sparse_map(size_type bucket_count, const Hash &hash = Hash(), - const KeyEqual &equal = KeyEqual(), - const Allocator &alloc = Allocator()) - : m_ht(bucket_count, hash, equal, alloc, ht::DEFAULT_MAX_LOAD_FACTOR) {} - - sparse_map(size_type bucket_count, const Allocator &alloc) - : sparse_map(bucket_count, Hash(), KeyEqual(), alloc) {} - - sparse_map(size_type bucket_count, const Hash &hash, const Allocator &alloc) - : sparse_map(bucket_count, hash, KeyEqual(), alloc) {} - - explicit sparse_map(const Allocator &alloc) - : sparse_map(ht::DEFAULT_INIT_BUCKET_COUNT, alloc) {} - - template - sparse_map(InputIt first, InputIt last, - size_type bucket_count = ht::DEFAULT_INIT_BUCKET_COUNT, - const Hash &hash = Hash(), const KeyEqual &equal = KeyEqual(), - const Allocator &alloc = Allocator()) - : sparse_map(bucket_count, hash, equal, alloc) { - insert(first, last); - } - - template - sparse_map(InputIt first, InputIt last, size_type bucket_count, - const Allocator &alloc) - : sparse_map(first, last, bucket_count, Hash(), KeyEqual(), alloc) {} - - template - sparse_map(InputIt first, InputIt last, size_type bucket_count, - const Hash &hash, const Allocator &alloc) - : sparse_map(first, last, bucket_count, hash, KeyEqual(), alloc) {} - - sparse_map(std::initializer_list init, - size_type bucket_count = ht::DEFAULT_INIT_BUCKET_COUNT, - const Hash &hash = Hash(), const KeyEqual &equal = KeyEqual(), - const Allocator &alloc = Allocator()) - : sparse_map(init.begin(), init.end(), bucket_count, hash, equal, alloc) { - } - - sparse_map(std::initializer_list init, size_type bucket_count, - const Allocator &alloc) - : sparse_map(init.begin(), init.end(), bucket_count, Hash(), KeyEqual(), - alloc) {} - - sparse_map(std::initializer_list init, size_type bucket_count, - const Hash &hash, const Allocator &alloc) - : sparse_map(init.begin(), init.end(), bucket_count, hash, KeyEqual(), - alloc) {} - - sparse_map &operator=(std::initializer_list ilist) { - m_ht.clear(); - - m_ht.reserve(ilist.size()); - m_ht.insert(ilist.begin(), ilist.end()); - - return *this; - } - - allocator_type get_allocator() const { return m_ht.get_allocator(); } - - /* - * Iterators - */ - iterator begin() noexcept { return m_ht.begin(); } - const_iterator begin() const noexcept { return m_ht.begin(); } - const_iterator cbegin() const noexcept { return m_ht.cbegin(); } - - iterator end() noexcept { return m_ht.end(); } - const_iterator end() const noexcept { return m_ht.end(); } - const_iterator cend() const noexcept { return m_ht.cend(); } - - /* - * Capacity - */ - bool empty() const noexcept { return m_ht.empty(); } - size_type size() const noexcept { return m_ht.size(); } - size_type max_size() const noexcept { return m_ht.max_size(); } - - /* - * Modifiers - */ - void clear() noexcept { m_ht.clear(); } - - std::pair insert(const value_type &value) { - return m_ht.insert(value); - } - - template ::value>::type * = nullptr> - std::pair insert(P &&value) { - return m_ht.emplace(std::forward

(value)); - } - - std::pair insert(value_type &&value) { - return m_ht.insert(std::move(value)); - } - - iterator insert(const_iterator hint, const value_type &value) { - return m_ht.insert_hint(hint, value); - } - - template ::value>::type * = nullptr> - iterator insert(const_iterator hint, P &&value) { - return m_ht.emplace_hint(hint, std::forward

(value)); - } - - iterator insert(const_iterator hint, value_type &&value) { - return m_ht.insert_hint(hint, std::move(value)); - } - - template - void insert(InputIt first, InputIt last) { - m_ht.insert(first, last); - } - - void insert(std::initializer_list ilist) { - m_ht.insert(ilist.begin(), ilist.end()); - } - - template - std::pair insert_or_assign(const key_type &k, M &&obj) { - return m_ht.insert_or_assign(k, std::forward(obj)); - } - - template - std::pair insert_or_assign(key_type &&k, M &&obj) { - return m_ht.insert_or_assign(std::move(k), std::forward(obj)); - } - - template - iterator insert_or_assign(const_iterator hint, const key_type &k, M &&obj) { - return m_ht.insert_or_assign(hint, k, std::forward(obj)); - } - - template - iterator insert_or_assign(const_iterator hint, key_type &&k, M &&obj) { - return m_ht.insert_or_assign(hint, std::move(k), std::forward(obj)); - } - - /** - * Due to the way elements are stored, emplace will need to move or copy the - * key-value once. The method is equivalent to - * `insert(value_type(std::forward(args)...));`. - * - * Mainly here for compatibility with the `std::unordered_map` interface. - */ - template - std::pair emplace(Args &&...args) { - return m_ht.emplace(std::forward(args)...); - } - - /** - * Due to the way elements are stored, emplace_hint will need to move or copy - * the key-value once. The method is equivalent to `insert(hint, - * value_type(std::forward(args)...));`. - * - * Mainly here for compatibility with the `std::unordered_map` interface. - */ - template - iterator emplace_hint(const_iterator hint, Args &&...args) { - return m_ht.emplace_hint(hint, std::forward(args)...); - } - - template - std::pair try_emplace(const key_type &k, Args &&...args) { - return m_ht.try_emplace(k, std::forward(args)...); - } - - template - std::pair try_emplace(key_type &&k, Args &&...args) { - return m_ht.try_emplace(std::move(k), std::forward(args)...); - } - - template - iterator try_emplace(const_iterator hint, const key_type &k, Args &&...args) { - return m_ht.try_emplace_hint(hint, k, std::forward(args)...); - } - - template - iterator try_emplace(const_iterator hint, key_type &&k, Args &&...args) { - return m_ht.try_emplace_hint(hint, std::move(k), - std::forward(args)...); - } - - iterator erase(iterator pos) { return m_ht.erase(pos); } - iterator erase(const_iterator pos) { return m_ht.erase(pos); } - iterator erase(const_iterator first, const_iterator last) { - return m_ht.erase(first, last); - } - size_type erase(const key_type &key) { return m_ht.erase(key); } - - /** - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - size_type erase(const key_type &key, std::size_t precalculated_hash) { - return m_ht.erase(key, precalculated_hash); - } - - /** - * This overload only participates in the overload resolution if the typedef - * `KeyEqual::is_transparent` exists. If so, `K` must be hashable and - * comparable to `Key`. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - size_type erase(const K &key) { - return m_ht.erase(key); - } - - /** - * @copydoc erase(const K& key) - * - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - size_type erase(const K &key, std::size_t precalculated_hash) { - return m_ht.erase(key, precalculated_hash); - } - - void swap(sparse_map &other) { other.m_ht.swap(m_ht); } - - /* - * Lookup - */ - T &at(const Key &key) { return m_ht.at(key); } - - /** - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - T &at(const Key &key, std::size_t precalculated_hash) { - return m_ht.at(key, precalculated_hash); - } - - const T &at(const Key &key) const { return m_ht.at(key); } - - /** - * @copydoc at(const Key& key, std::size_t precalculated_hash) - */ - const T &at(const Key &key, std::size_t precalculated_hash) const { - return m_ht.at(key, precalculated_hash); - } - - /** - * This overload only participates in the overload resolution if the typedef - * `KeyEqual::is_transparent` exists. If so, `K` must be hashable and - * comparable to `Key`. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - T &at(const K &key) { - return m_ht.at(key); - } - - /** - * @copydoc at(const K& key) - * - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - T &at(const K &key, std::size_t precalculated_hash) { - return m_ht.at(key, precalculated_hash); - } - - /** - * @copydoc at(const K& key) - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - const T &at(const K &key) const { - return m_ht.at(key); - } - - /** - * @copydoc at(const K& key, std::size_t precalculated_hash) - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - const T &at(const K &key, std::size_t precalculated_hash) const { - return m_ht.at(key, precalculated_hash); - } - - T &operator[](const Key &key) { return m_ht[key]; } - T &operator[](Key &&key) { return m_ht[std::move(key)]; } - - size_type count(const Key &key) const { return m_ht.count(key); } - - /** - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - size_type count(const Key &key, std::size_t precalculated_hash) const { - return m_ht.count(key, precalculated_hash); - } - - /** - * This overload only participates in the overload resolution if the typedef - * `KeyEqual::is_transparent` exists. If so, `K` must be hashable and - * comparable to `Key`. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - size_type count(const K &key) const { - return m_ht.count(key); - } - - /** - * @copydoc count(const K& key) const - * - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - size_type count(const K &key, std::size_t precalculated_hash) const { - return m_ht.count(key, precalculated_hash); - } - - iterator find(const Key &key) { return m_ht.find(key); } - - /** - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - iterator find(const Key &key, std::size_t precalculated_hash) { - return m_ht.find(key, precalculated_hash); - } - - const_iterator find(const Key &key) const { return m_ht.find(key); } - - /** - * @copydoc find(const Key& key, std::size_t precalculated_hash) - */ - const_iterator find(const Key &key, std::size_t precalculated_hash) const { - return m_ht.find(key, precalculated_hash); - } - - /** - * This overload only participates in the overload resolution if the typedef - * `KeyEqual::is_transparent` exists. If so, `K` must be hashable and - * comparable to `Key`. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - iterator find(const K &key) { - return m_ht.find(key); - } - - /** - * @copydoc find(const K& key) - * - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - iterator find(const K &key, std::size_t precalculated_hash) { - return m_ht.find(key, precalculated_hash); - } - - /** - * @copydoc find(const K& key) - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - const_iterator find(const K &key) const { - return m_ht.find(key); - } - - /** - * @copydoc find(const K& key) - * - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - const_iterator find(const K &key, std::size_t precalculated_hash) const { - return m_ht.find(key, precalculated_hash); - } - - bool contains(const Key &key) const { return m_ht.contains(key); } - - /** - * Use the hash value 'precalculated_hash' instead of hashing the key. The - * hash value should be the same as hash_function()(key). Useful to speed-up - * the lookup if you already have the hash. - */ - bool contains(const Key &key, std::size_t precalculated_hash) const { - return m_ht.contains(key, precalculated_hash); - } - - /** - * This overload only participates in the overload resolution if the typedef - * KeyEqual::is_transparent exists. If so, K must be hashable and comparable - * to Key. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - bool contains(const K &key) const { - return m_ht.contains(key); - } - - /** - * @copydoc contains(const K& key) const - * - * Use the hash value 'precalculated_hash' instead of hashing the key. The - * hash value should be the same as hash_function()(key). Useful to speed-up - * the lookup if you already have the hash. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - bool contains(const K &key, std::size_t precalculated_hash) const { - return m_ht.contains(key, precalculated_hash); - } - - std::pair equal_range(const Key &key) { - return m_ht.equal_range(key); - } - - /** - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - std::pair equal_range(const Key &key, - std::size_t precalculated_hash) { - return m_ht.equal_range(key, precalculated_hash); - } - - std::pair equal_range(const Key &key) const { - return m_ht.equal_range(key); - } - - /** - * @copydoc equal_range(const Key& key, std::size_t precalculated_hash) - */ - std::pair equal_range( - const Key &key, std::size_t precalculated_hash) const { - return m_ht.equal_range(key, precalculated_hash); - } - - /** - * This overload only participates in the overload resolution if the typedef - * `KeyEqual::is_transparent` exists. If so, `K` must be hashable and - * comparable to `Key`. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - std::pair equal_range(const K &key) { - return m_ht.equal_range(key); - } - - /** - * @copydoc equal_range(const K& key) - * - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - std::pair equal_range(const K &key, - std::size_t precalculated_hash) { - return m_ht.equal_range(key, precalculated_hash); - } - - /** - * @copydoc equal_range(const K& key) - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - std::pair equal_range(const K &key) const { - return m_ht.equal_range(key); - } - - /** - * @copydoc equal_range(const K& key, std::size_t precalculated_hash) - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - std::pair equal_range( - const K &key, std::size_t precalculated_hash) const { - return m_ht.equal_range(key, precalculated_hash); - } - - /* - * Bucket interface - */ - size_type bucket_count() const { return m_ht.bucket_count(); } - size_type max_bucket_count() const { return m_ht.max_bucket_count(); } - - /* - * Hash policy - */ - float load_factor() const { return m_ht.load_factor(); } - float max_load_factor() const { return m_ht.max_load_factor(); } - void max_load_factor(float ml) { m_ht.max_load_factor(ml); } - - void rehash(size_type count) { m_ht.rehash(count); } - void reserve(size_type count) { m_ht.reserve(count); } - - /* - * Observers - */ - hasher hash_function() const { return m_ht.hash_function(); } - key_equal key_eq() const { return m_ht.key_eq(); } - - /* - * Other - */ - - /** - * Convert a `const_iterator` to an `iterator`. - */ - iterator mutable_iterator(const_iterator pos) { - return m_ht.mutable_iterator(pos); - } - - /** - * Serialize the map through the `serializer` parameter. - * - * The `serializer` parameter must be a function object that supports the - * following call: - * - `template void operator()(const U& value);` where the types - * `std::uint64_t`, `float` and `std::pair` must be supported for U. - * - * The implementation leaves binary compatibility (endianness, IEEE 754 for - * floats, ...) of the types it serializes in the hands of the `Serializer` - * function object if compatibility is required. - */ - template - void serialize(Serializer &serializer) const { - m_ht.serialize(serializer); - } - - /** - * Deserialize a previously serialized map through the `deserializer` - * parameter. - * - * The `deserializer` parameter must be a function object that supports the - * following calls: - * - `template U operator()();` where the types `std::uint64_t`, - * `float` and `std::pair` must be supported for U. - * - * If the deserialized hash map type is hash compatible with the serialized - * map, the deserialization process can be sped up by setting - * `hash_compatible` to true. To be hash compatible, the Hash, KeyEqual and - * GrowthPolicy must behave the same way than the ones used on the serialized - * map. The `std::size_t` must also be of the same size as the one on the - * platform used to serialize the map. If these criteria are not met, the - * behaviour is undefined with `hash_compatible` sets to true. - * - * The behaviour is undefined if the type `Key` and `T` of the `sparse_map` - * are not the same as the types used during serialization. - * - * The implementation leaves binary compatibility (endianness, IEEE 754 for - * floats, size of int, ...) of the types it deserializes in the hands of the - * `Deserializer` function object if compatibility is required. - */ - template - static sparse_map deserialize(Deserializer &deserializer, - bool hash_compatible = false) { - sparse_map map(0); - map.m_ht.deserialize(deserializer, hash_compatible); - - return map; - } - - friend bool operator==(const sparse_map &lhs, const sparse_map &rhs) { - if (lhs.size() != rhs.size()) { - return false; - } - - for (const auto &element_lhs : lhs) { - const auto it_element_rhs = rhs.find(element_lhs.first); - if (it_element_rhs == rhs.cend() || - element_lhs.second != it_element_rhs->second) { - return false; - } - } - - return true; - } - - friend bool operator!=(const sparse_map &lhs, const sparse_map &rhs) { - return !operator==(lhs, rhs); - } - - friend void swap(sparse_map &lhs, sparse_map &rhs) { lhs.swap(rhs); } - - private: - ht m_ht; -}; - -/** - * Same as `dice::sparse_map`. - */ -template , - class KeyEqual = std::equal_to, - class Allocator = std::allocator>> -using sparse_pg_map = - sparse_map; - -} // end namespace dice - -#endif diff --git a/include/dice/sparse-map/sparse_set.hpp b/include/dice/sparse-map/sparse_set.hpp deleted file mode 100644 index d0e0f0b..0000000 --- a/include/dice/sparse-map/sparse_set.hpp +++ /dev/null @@ -1,656 +0,0 @@ -/** - * MIT License - * - * Copyright (c) 2017 Thibaut Goetghebuer-Planchon - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef DICE_SPARSE_MAP_SPARSE_SET_HPP -#define DICE_SPARSE_MAP_SPARSE_SET_HPP - -#include -#include -#include -#include -#include -#include - -#include "dice/sparse-map/sparse_hash.hpp" -#include "dice/sparse-map/boost_offset_pointer.hpp" - -namespace dice::sparse_map { - -/** - * Implementation of a sparse hash set using open-addressing with quadratic - * probing. The goal on the hash set is to be the most memory efficient - * possible, even at low load factor, while keeping reasonable performances. - * - * `GrowthPolicy` defines how the set grows and consequently how a hash value is - * mapped to a bucket. By default the set uses - * `dice::sh::power_of_two_growth_policy`. This policy keeps the number of - * buckets to a power of two and uses a mask to map the hash to a bucket instead - * of the slow modulo. Other growth policies are available and you may define - * your own growth policy, check `dice::sh::power_of_two_growth_policy` for the - * interface. - * - * `ExceptionSafety` defines the exception guarantee provided by the class. By - * default only the basic exception safety is guaranteed which mean that all - * resources used by the hash set will be freed (no memory leaks) but the hash - * set may end-up in an undefined state if an exception is thrown (undefined - * here means that some elements may be missing). This can ONLY happen on rehash - * (either on insert or if `rehash` is called explicitly) and will occur if the - * Allocator can't allocate memory (`std::bad_alloc`) or if the copy constructor - * (when a nothrow move constructor is not available) throws an exception. This - * can be avoided by calling `reserve` beforehand. This basic guarantee is - * similar to the one of `google::sparse_hash_map` and `spp::sparse_hash_map`. - * It is possible to ask for the strong exception guarantee with - * `dice::sh::exception_safety::strong`, the drawback is that the set will be - * slower on rehashes and will also need more memory on rehashes. - * - * `Sparsity` defines how much the hash set will compromise between insertion - * speed and memory usage. A high sparsity means less memory usage but longer - * insertion times, and vice-versa for low sparsity. The default - * `dice::sh::sparsity::medium` sparsity offers a good compromise. It doesn't - * change the lookup speed. - * - * `Key` must be nothrow move constructible and/or copy constructible. - * - * If the destructor of `Key` throws an exception, the behaviour of the class is - * undefined. - * - * Iterators invalidation: - * - clear, operator=, reserve, rehash: always invalidate the iterators. - * - insert, emplace, emplace_hint: if there is an effective insert, invalidate - * the iterators. - * - erase: always invalidate the iterators. - */ -template , - class KeyEqual = std::equal_to, - class Allocator = std::allocator, - class GrowthPolicy = dice::sparse_map::sh::power_of_two_growth_policy<2>, - dice::sparse_map::sh::exception_safety ExceptionSafety = - dice::sparse_map::sh::exception_safety::basic, - dice::sparse_map::sh::sparsity Sparsity = dice::sparse_map::sh::sparsity::medium> -class sparse_set { - private: - template - using has_is_transparent = dice::sparse_map::detail_sparse_hash::has_is_transparent; - - class KeySelect { - public: - using key_type = Key; - - const key_type &operator()(const Key &key) const noexcept { return key; } - - key_type &operator()(Key &key) noexcept { return key; } - }; - - using ht = - detail_sparse_hash::sparse_hash; - - public: - using key_type = typename ht::key_type; - using value_type = typename ht::value_type; - using size_type = typename ht::size_type; - using difference_type = typename ht::difference_type; - using hasher = typename ht::hasher; - using key_equal = typename ht::key_equal; - using allocator_type = typename ht::allocator_type; - using reference = typename ht::reference; - using const_reference = typename ht::const_reference; - using pointer = typename ht::pointer; - using const_pointer = typename ht::const_pointer; - using iterator = typename ht::iterator; - using const_iterator = typename ht::const_iterator; - - /* - * Constructors - */ - sparse_set() : sparse_set(ht::DEFAULT_INIT_BUCKET_COUNT) {} - - explicit sparse_set(size_type bucket_count, const Hash &hash = Hash(), - const KeyEqual &equal = KeyEqual(), - const Allocator &alloc = Allocator()) - : m_ht(bucket_count, hash, equal, alloc, ht::DEFAULT_MAX_LOAD_FACTOR) {} - - sparse_set(size_type bucket_count, const Allocator &alloc) - : sparse_set(bucket_count, Hash(), KeyEqual(), alloc) {} - - sparse_set(size_type bucket_count, const Hash &hash, const Allocator &alloc) - : sparse_set(bucket_count, hash, KeyEqual(), alloc) {} - - explicit sparse_set(const Allocator &alloc) - : sparse_set(ht::DEFAULT_INIT_BUCKET_COUNT, alloc) {} - - template - sparse_set(InputIt first, InputIt last, - size_type bucket_count = ht::DEFAULT_INIT_BUCKET_COUNT, - const Hash &hash = Hash(), const KeyEqual &equal = KeyEqual(), - const Allocator &alloc = Allocator()) - : sparse_set(bucket_count, hash, equal, alloc) { - insert(first, last); - } - - template - sparse_set(InputIt first, InputIt last, size_type bucket_count, - const Allocator &alloc) - : sparse_set(first, last, bucket_count, Hash(), KeyEqual(), alloc) {} - - template - sparse_set(InputIt first, InputIt last, size_type bucket_count, - const Hash &hash, const Allocator &alloc) - : sparse_set(first, last, bucket_count, hash, KeyEqual(), alloc) {} - - sparse_set(std::initializer_list init, - size_type bucket_count = ht::DEFAULT_INIT_BUCKET_COUNT, - const Hash &hash = Hash(), const KeyEqual &equal = KeyEqual(), - const Allocator &alloc = Allocator()) - : sparse_set(init.begin(), init.end(), bucket_count, hash, equal, alloc) { - } - - sparse_set(std::initializer_list init, size_type bucket_count, - const Allocator &alloc) - : sparse_set(init.begin(), init.end(), bucket_count, Hash(), KeyEqual(), - alloc) {} - - sparse_set(std::initializer_list init, size_type bucket_count, - const Hash &hash, const Allocator &alloc) - : sparse_set(init.begin(), init.end(), bucket_count, hash, KeyEqual(), - alloc) {} - - sparse_set &operator=(std::initializer_list ilist) { - m_ht.clear(); - - m_ht.reserve(ilist.size()); - m_ht.insert(ilist.begin(), ilist.end()); - - return *this; - } - - allocator_type get_allocator() const { return m_ht.get_allocator(); } - - /* - * Iterators - */ - iterator begin() noexcept { return m_ht.begin(); } - const_iterator begin() const noexcept { return m_ht.begin(); } - const_iterator cbegin() const noexcept { return m_ht.cbegin(); } - - iterator end() noexcept { return m_ht.end(); } - const_iterator end() const noexcept { return m_ht.end(); } - const_iterator cend() const noexcept { return m_ht.cend(); } - - /* - * Capacity - */ - bool empty() const noexcept { return m_ht.empty(); } - size_type size() const noexcept { return m_ht.size(); } - size_type max_size() const noexcept { return m_ht.max_size(); } - - /* - * Modifiers - */ - void clear() noexcept { m_ht.clear(); } - - std::pair insert(const value_type &value) { - return m_ht.insert(value); - } - - std::pair insert(value_type &&value) { - return m_ht.insert(std::move(value)); - } - - iterator insert(const_iterator hint, const value_type &value) { - return m_ht.insert_hint(hint, value); - } - - iterator insert(const_iterator hint, value_type &&value) { - return m_ht.insert_hint(hint, std::move(value)); - } - - template - void insert(InputIt first, InputIt last) { - m_ht.insert(first, last); - } - - void insert(std::initializer_list ilist) { - m_ht.insert(ilist.begin(), ilist.end()); - } - - /** - * Due to the way elements are stored, emplace will need to move or copy the - * key-value once. The method is equivalent to - * `insert(value_type(std::forward(args)...));`. - * - * Mainly here for compatibility with the `std::unordered_map` interface. - */ - template - std::pair emplace(Args &&...args) { - return m_ht.emplace(std::forward(args)...); - } - - /** - * Due to the way elements are stored, emplace_hint will need to move or copy - * the key-value once. The method is equivalent to `insert(hint, - * value_type(std::forward(args)...));`. - * - * Mainly here for compatibility with the `std::unordered_map` interface. - */ - template - iterator emplace_hint(const_iterator hint, Args &&...args) { - return m_ht.emplace_hint(hint, std::forward(args)...); - } - - iterator erase(iterator pos) { return m_ht.erase(pos); } - iterator erase(const_iterator pos) { return m_ht.erase(pos); } - iterator erase(const_iterator first, const_iterator last) { - return m_ht.erase(first, last); - } - size_type erase(const key_type &key) { return m_ht.erase(key); } - - /** - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - size_type erase(const key_type &key, std::size_t precalculated_hash) { - return m_ht.erase(key, precalculated_hash); - } - - /** - * This overload only participates in the overload resolution if the typedef - * `KeyEqual::is_transparent` exists. If so, `K` must be hashable and - * comparable to `Key`. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - size_type erase(const K &key) { - return m_ht.erase(key); - } - - /** - * @copydoc erase(const K& key) - * - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - size_type erase(const K &key, std::size_t precalculated_hash) { - return m_ht.erase(key, precalculated_hash); - } - - void swap(sparse_set &other) { other.m_ht.swap(m_ht); } - - /* - * Lookup - */ - size_type count(const Key &key) const { return m_ht.count(key); } - - /** - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - size_type count(const Key &key, std::size_t precalculated_hash) const { - return m_ht.count(key, precalculated_hash); - } - - /** - * This overload only participates in the overload resolution if the typedef - * `KeyEqual::is_transparent` exists. If so, `K` must be hashable and - * comparable to `Key`. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - size_type count(const K &key) const { - return m_ht.count(key); - } - - /** - * @copydoc count(const K& key) const - * - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - size_type count(const K &key, std::size_t precalculated_hash) const { - return m_ht.count(key, precalculated_hash); - } - - iterator find(const Key &key) { return m_ht.find(key); } - - /** - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - iterator find(const Key &key, std::size_t precalculated_hash) { - return m_ht.find(key, precalculated_hash); - } - - const_iterator find(const Key &key) const { return m_ht.find(key); } - - /** - * @copydoc find(const Key& key, std::size_t precalculated_hash) - */ - const_iterator find(const Key &key, std::size_t precalculated_hash) const { - return m_ht.find(key, precalculated_hash); - } - - /** - * This overload only participates in the overload resolution if the typedef - * `KeyEqual::is_transparent` exists. If so, `K` must be hashable and - * comparable to `Key`. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - iterator find(const K &key) { - return m_ht.find(key); - } - - /** - * @copydoc find(const K& key) - * - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - iterator find(const K &key, std::size_t precalculated_hash) { - return m_ht.find(key, precalculated_hash); - } - - /** - * @copydoc find(const K& key) - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - const_iterator find(const K &key) const { - return m_ht.find(key); - } - - /** - * @copydoc find(const K& key) - * - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - const_iterator find(const K &key, std::size_t precalculated_hash) const { - return m_ht.find(key, precalculated_hash); - } - - bool contains(const Key &key) const { return m_ht.contains(key); } - - /** - * Use the hash value 'precalculated_hash' instead of hashing the key. The - * hash value should be the same as hash_function()(key). Useful to speed-up - * the lookup if you already have the hash. - */ - bool contains(const Key &key, std::size_t precalculated_hash) const { - return m_ht.contains(key, precalculated_hash); - } - - /** - * This overload only participates in the overload resolution if the typedef - * KeyEqual::is_transparent exists. If so, K must be hashable and comparable - * to Key. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - bool contains(const K &key) const { - return m_ht.contains(key); - } - - /** - * @copydoc contains(const K& key) const - * - * Use the hash value 'precalculated_hash' instead of hashing the key. The - * hash value should be the same as hash_function()(key). Useful to speed-up - * the lookup if you already have the hash. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - bool contains(const K &key, std::size_t precalculated_hash) const { - return m_ht.contains(key, precalculated_hash); - } - - std::pair equal_range(const Key &key) { - return m_ht.equal_range(key); - } - - /** - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - std::pair equal_range(const Key &key, - std::size_t precalculated_hash) { - return m_ht.equal_range(key, precalculated_hash); - } - - std::pair equal_range(const Key &key) const { - return m_ht.equal_range(key); - } - - /** - * @copydoc equal_range(const Key& key, std::size_t precalculated_hash) - */ - std::pair equal_range( - const Key &key, std::size_t precalculated_hash) const { - return m_ht.equal_range(key, precalculated_hash); - } - - /** - * This overload only participates in the overload resolution if the typedef - * `KeyEqual::is_transparent` exists. If so, `K` must be hashable and - * comparable to `Key`. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - std::pair equal_range(const K &key) { - return m_ht.equal_range(key); - } - - /** - * @copydoc equal_range(const K& key) - * - * Use the hash value `precalculated_hash` instead of hashing the key. The - * hash value should be the same as `hash_function()(key)`, otherwise the - * behaviour is undefined. Useful to speed-up the lookup if you already have - * the hash. - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - std::pair equal_range(const K &key, - std::size_t precalculated_hash) { - return m_ht.equal_range(key, precalculated_hash); - } - - /** - * @copydoc equal_range(const K& key) - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - std::pair equal_range(const K &key) const { - return m_ht.equal_range(key); - } - - /** - * @copydoc equal_range(const K& key, std::size_t precalculated_hash) - */ - template < - class K, class KE = KeyEqual, - typename std::enable_if::value>::type * = nullptr> - std::pair equal_range( - const K &key, std::size_t precalculated_hash) const { - return m_ht.equal_range(key, precalculated_hash); - } - - /* - * Bucket interface - */ - size_type bucket_count() const { return m_ht.bucket_count(); } - size_type max_bucket_count() const { return m_ht.max_bucket_count(); } - - /* - * Hash policy - */ - float load_factor() const { return m_ht.load_factor(); } - float max_load_factor() const { return m_ht.max_load_factor(); } - void max_load_factor(float ml) { m_ht.max_load_factor(ml); } - - void rehash(size_type count) { m_ht.rehash(count); } - void reserve(size_type count) { m_ht.reserve(count); } - - /* - * Observers - */ - hasher hash_function() const { return m_ht.hash_function(); } - key_equal key_eq() const { return m_ht.key_eq(); } - - /* - * Other - */ - - /** - * Convert a `const_iterator` to an `iterator`. - */ - iterator mutable_iterator(const_iterator pos) { - return m_ht.mutable_iterator(pos); - } - - /** - * Serialize the set through the `serializer` parameter. - * - * The `serializer` parameter must be a function object that supports the - * following call: - * - `void operator()(const U& value);` where the types `std::uint64_t`, - * `float` and `Key` must be supported for U. - * - * The implementation leaves binary compatibility (endianness, IEEE 754 for - * floats, ...) of the types it serializes in the hands of the `Serializer` - * function object if compatibility is required. - */ - template - void serialize(Serializer &serializer) const { - m_ht.serialize(serializer); - } - - /** - * Deserialize a previously serialized set through the `deserializer` - * parameter. - * - * The `deserializer` parameter must be a function object that supports the - * following calls: - * - `template U operator()();` where the types `std::uint64_t`, - * `float` and `Key` must be supported for U. - * - * If the deserialized hash set type is hash compatible with the serialized - * set, the deserialization process can be sped up by setting - * `hash_compatible` to true. To be hash compatible, the Hash, KeyEqual and - * GrowthPolicy must behave the same way than the ones used on the serialized - * set. The `std::size_t` must also be of the same size as the one on the - * platform used to serialize the set. If these criteria are not met, the - * behaviour is undefined with `hash_compatible` sets to true. - * - * The behaviour is undefined if the type `Key` of the `sparse_set` is not the - * same as the type used during serialization. - * - * The implementation leaves binary compatibility (endianness, IEEE 754 for - * floats, size of int, ...) of the types it deserializes in the hands of the - * `Deserializer` function object if compatibility is required. - */ - template - static sparse_set deserialize(Deserializer &deserializer, - bool hash_compatible = false) { - sparse_set set(0); - set.m_ht.deserialize(deserializer, hash_compatible); - - return set; - } - - friend bool operator==(const sparse_set &lhs, const sparse_set &rhs) { - if (lhs.size() != rhs.size()) { - return false; - } - - for (const auto &element_lhs : lhs) { - const auto it_element_rhs = rhs.find(element_lhs); - if (it_element_rhs == rhs.cend()) { - return false; - } - } - - return true; - } - - friend bool operator!=(const sparse_set &lhs, const sparse_set &rhs) { - return !operator==(lhs, rhs); - } - - friend void swap(sparse_set &lhs, sparse_set &rhs) { lhs.swap(rhs); } - - private: - ht m_ht; -}; - -/** - * Same as `dice::sparse_set`. - */ -template , - class KeyEqual = std::equal_to, - class Allocator = std::allocator> -using sparse_pg_set = - sparse_set; - -} // end namespace dice - -#endif diff --git a/include/dice/sparse_map/internal/sparse_bucket.hpp b/include/dice/sparse_map/internal/sparse_bucket.hpp new file mode 100644 index 0000000..a0d5b22 --- /dev/null +++ b/include/dice/sparse_map/internal/sparse_bucket.hpp @@ -0,0 +1,581 @@ +#ifndef DICE_SPARSE_MAP_SPARSE_BUCKET_HPP +#define DICE_SPARSE_MAP_SPARSE_BUCKET_HPP + +#include +#include +#include +#include +#include +#include + +#include "../sparse_props.hpp" + +namespace dice::sparse_map::internal { + + /** + * WARNING: the sparse_bucket class doesn't free the resources allocated through + * the allocator passed in parameter in each method. You have to manually call + * `clear(Allocator&)` when you don't need a sparse_array_type object anymore. + * + * The reason is that the sparse_array_type doesn't store the allocator to avoid + * wasting space in each sparse_array_type when the allocator has a size > 0. It only + * allocates/deallocates objects with the allocator that is passed in parameter. + * + * + * + * Index denotes a value between [0, discriminant_bits), it is an index similar to + * std::vector. Offset denotes the real position in `values_` corresponding to + * an index. + * + * We are using raw pointers instead of std::vector to avoid loosing + * 2*sizeof(size_t) bytes to store the capacity and size of the vector in each + * sparse_array_type. We know we can only store up to discriminant_bits elements in the + * array, we don't need such big types. + * + * + * T must be nothrow move constructible and/or copy constructible. + * Behaviour is undefined if the destructor of T throws an exception. + * + * See https://smerity.com/articles/2015/google_sparsehash.html for details on + * the idea behinds the implementation. + */ + template + struct sparse_bucket { + private: + using alloc_traits = std::allocator_traits; + + public: + using value_type = T; + using size_type = std::uint_least8_t; + using allocator_type = Allocator; + using pointer = typename alloc_traits::pointer; + using const_pointer = typename alloc_traits::const_pointer; + using iterator = pointer; + using const_iterator = const_pointer; + + static constexpr size_type capacity_growth_step = []() { + switch (Sparsity) { + case sparsity::high: return 2; + case sparsity::medium: return 4; + case sparsity::low: return 8; + } + }(); + + using discriminant_type = std::uint_least64_t; + static constexpr std::size_t discriminant_bits = 64; + + static constexpr std::size_t bucket_shift = 6; + static constexpr std::size_t bucket_mask = discriminant_bits - 1; + + static_assert(std::has_single_bit(discriminant_bits), + "discriminant_bits must be a power of two."); + static_assert(std::numeric_limits::digits >= discriminant_bits, + "discriminant_type must be able to hold at least discriminant_bits."); + static_assert((std::size_t(1) << bucket_shift) == discriminant_bits, + "(1 << bucket_shift) must be equal to discriminant_bits."); + static_assert(std::numeric_limits::max() >= discriminant_bits, + "size_type must be big enough to hold discriminant_bits."); + static_assert(std::is_unsigned::value, + "discriminant_type must be unsigned."); + static_assert((std::numeric_limits::max() & bucket_mask) == discriminant_bits - 1); + + private: + pointer values_ = nullptr; + + discriminant_type value_discriminant_ = 0; + discriminant_type deleted_discriminant_ = 0; + + size_type size_ = 0; + size_type capacity_ = 0; + + public: + /** + * Map an ibucket [0, bucket_count) in the hash table to a sparse_ibucket + * (a sparse_array_type holds multiple buckets, so there is less sparse_array_type than + * bucket_count). + * + * The bucket ibucket is in + * m_sparse_buckets[sparse_ibucket(ibucket)][index_in_sparse_bucket(ibucket)] + * instead of something like m_buckets[ibucket] in a classical hash table. + */ + [[nodiscard]] static constexpr std::size_t sparse_ibucket(std::size_t ibucket) noexcept { + return ibucket >> bucket_shift; + } + + /** + * Map an ibucket [0, bucket_count) in the hash table to an index in the + * sparse_array_type which corresponds to the bucket. + * + * The bucket ibucket is in + * m_sparse_buckets[sparse_ibucket(ibucket)][index_in_sparse_bucket(ibucket)] + * instead of something like m_buckets[ibucket] in a classical hash table. + */ + [[nodiscard]] static constexpr size_type index_in_sparse_bucket(std::size_t ibucket) noexcept { + return static_cast(ibucket & bucket_mask); + } + + [[nodiscard]] static constexpr std::size_t nb_sparse_buckets(std::size_t bucket_count) noexcept { + if (bucket_count == 0) { + return 0; + } + + return std::max(1, sparse_ibucket(std::bit_ceil(bucket_count))); + } + + template + static void construct_at(allocator_type &alloc, pointer p, Args &&...args) noexcept(std::is_nothrow_constructible_v) { + alloc_traits::construct(alloc, std::to_address(p), std::forward(args)...); + } + + static void destroy_at(allocator_type &alloc, pointer p) noexcept(std::is_nothrow_destructible_v) { + alloc_traits::destroy(alloc, std::to_address(p)); + } + + public: + constexpr sparse_bucket() noexcept = default; + + sparse_bucket(sparse_bucket const &other) = delete; + sparse_bucket(sparse_bucket &&other) = delete; + sparse_bucket &operator=(sparse_bucket const &) = delete; + sparse_bucket &operator=(sparse_bucket &&) = delete; + + // The code that manages the bucket must have called clear before + // destruction. See documentation of sparse_array_type for more details. + ~sparse_bucket() noexcept = default; + + sparse_bucket(size_type capacity, allocator_type &alloc) : capacity_{capacity} { + if (capacity_ == 0) { + return; + } + + values_ = alloc_traits::allocate(alloc, capacity_); + assert(values_ != nullptr);// allocate should throw if there is a failure + } + + sparse_bucket(sparse_bucket const &other, allocator_type &alloc) : values_{nullptr}, + value_discriminant_{other.value_discriminant_}, + deleted_discriminant_{other.deleted_discriminant_}, + size_{0}, + capacity_{other.capacity_} { + + assert(other.capacity_ >= other.size_); + if (capacity_ == 0) { + return; + } + + values_ = alloc_traits::allocate(alloc, capacity_); + assert(values_ != nullptr);// allocate should throw if there is a failure + + try { + for (; size_ < other.size_; ++size_) { + construct_at(alloc, values_ + size_, other.values_[size_]); + } + } catch (...) { + clear(alloc); + throw; + } + } + + sparse_bucket(sparse_bucket &&other, allocator_type &alloc) : value_discriminant_{other.value_discriminant_}, + deleted_discriminant_{other.deleted_discriminant_}, + size_{0}, + capacity_{other.capacity_} { + // this ctor must only be called when the allocator is actually different + // cannot check if the allocators were actually different, but the static_assert helps + static_assert(!alloc_traits::is_always_equal::value); + + assert(other.capacity_ >= other.size_); + if (capacity_ == 0) { + return; + } + + values_ = alloc_traits::allocate(alloc, capacity_); + assert(values_ != nullptr); // allocate should throw if there is a failure + + if constexpr (std::is_trivially_copyable_v) { + std::memcpy(&values_[0], &other.values_[0], other.size_ * sizeof(value_type)); + size_ = other.size_; + } else if constexpr (std::is_nothrow_move_constructible_v) { + for (size_type i = 0; i < other.size_; i++) { + construct_at(alloc, &values_[i], std::move(other.values_[i])); + } + + size_ = other.size_; + } else { + try { + for (; size_ < other.size_; ++size_) { + construct_at(alloc, &values_[size_], std::move(other.values_[size_])); + } + } catch (...) { + clear(alloc); + throw; + } + } + + other.clear(alloc); + } + + /** + * @safety This function is only safe to call if the underlying object is non-const + */ + static iterator unsafe_mutable_iterator(const_iterator pos) noexcept { + if constexpr (std::is_pointer_v) { + return const_cast(pos); + } else { + return iterator{const_cast(std::to_address(pos))}; + } + } + + [[nodiscard]] constexpr iterator begin() noexcept { return values_; } + [[nodiscard]] constexpr iterator end() noexcept { return values_ + size_; } + [[nodiscard]] constexpr const_iterator begin() const noexcept { return cbegin(); } + [[nodiscard]] constexpr const_iterator end() const noexcept { return cend(); } + [[nodiscard]] constexpr const_iterator cbegin() const noexcept { return values_; } + [[nodiscard]] constexpr const_iterator cend() const noexcept { return values_ + size_; } + + [[nodiscard]] constexpr bool empty() const noexcept { return size_ == 0; } + + [[nodiscard]] constexpr size_type size() const noexcept { return size_; } + + void destroy_deallocate(allocator_type &alloc) noexcept(std::is_nothrow_destructible_v) { + destroy_and_deallocate_values(alloc, values_, size_, capacity_); + } + + void clear(allocator_type &alloc) noexcept(std::is_nothrow_destructible_v) { + destroy_deallocate(alloc); + + values_ = nullptr; + value_discriminant_ = 0; + deleted_discriminant_ = 0; + size_ = 0; + capacity_ = 0; + } + + [[nodiscard]] constexpr bool has_value(size_type index) const noexcept { + assert(index < discriminant_bits); + return (value_discriminant_ & (discriminant_type{1} << index)) != 0; + } + + [[nodiscard]] constexpr bool has_deleted_value(size_type index) const noexcept { + assert(index < discriminant_bits); + return (deleted_discriminant_ & (discriminant_type{1} << index)) != 0; + } + + [[nodiscard]] iterator value(size_type index) noexcept { + assert(has_value(index)); + return values_ + index_to_offset(index); + } + + [[nodiscard]] const_iterator value(size_type index) const noexcept { + assert(has_value(index)); + return values_ + index_to_offset(index); + } + + /** + * Return iterator to set value. + */ + template + iterator set(allocator_type &alloc, size_type index, Args &&...value_args) { + assert(!has_value(index)); + + const size_type offset = index_to_offset(index); + insert_at_offset(alloc, offset, std::forward(value_args)...); + + value_discriminant_ |= discriminant_type{1} << index; + deleted_discriminant_ &= ~(discriminant_type{1} << index); + + size_ += 1; + + assert(has_value(index)); + assert(!has_deleted_value(index)); + + return values_ + offset; + } + + iterator erase(allocator_type &alloc, iterator position) { + auto const offset = static_cast(std::distance(begin(), position)); + return erase(alloc, position, offset_to_index(offset)); + } + + // Return the next value or end if no next value + iterator erase(allocator_type &alloc, iterator position, size_type index) { + assert(has_value(index)); + assert(!has_deleted_value(index)); + + auto const offset = static_cast(std::distance(begin(), position)); + erase_at_offset(alloc, offset); + + value_discriminant_ &= ~(discriminant_type{1} << index); + deleted_discriminant_ |= discriminant_type{1} << index; + + size_ -= 1; + + assert(!has_value(index)); + assert(has_deleted_value(index)); + + return values_ + offset; + } + + void swap(sparse_bucket &other) noexcept { + using std::swap; + + swap(values_, other.values_); + swap(value_discriminant_, other.value_discriminant_); + swap(deleted_discriminant_, other.deleted_discriminant_); + swap(size_, other.size_); + swap(capacity_, other.capacity_); + } + + private: + static void destroy_and_deallocate_values(allocator_type &alloc, + pointer values, + size_type nb_values, + size_type capacity_values) noexcept { + if constexpr (!std::is_trivially_destructible_v) { + for (size_type i = 0; i < nb_values; i++) { + destroy_at(alloc, &values[i]); + } + } + + alloc_traits::deallocate(alloc, values, capacity_values); + } + + [[nodiscard]] constexpr size_type index_to_offset(size_type index) const noexcept { + assert(index < discriminant_bits); + return std::popcount(value_discriminant_ & ((discriminant_type{1} << index) - discriminant_type{1})); + } + + [[nodiscard]] constexpr size_t offset_to_index(size_t offset) const noexcept { + assert(offset < static_cast(std::popcount(value_discriminant_))); + + size_type index = 0; + discriminant_type acc = value_discriminant_; + + while (true) { + size_t const ones = std::countr_one(acc); + if (ones > offset) { + break; + } + + acc >>= ones; + index += ones; + offset -= ones; + + size_t const skip = std::countr_zero(acc); + acc >>= skip; + index += skip; + } + + return index + offset; + } + + [[nodiscard]] constexpr size_type next_capacity() const noexcept { + return static_cast(capacity_ + capacity_growth_step); + } + + /** + * Insertion + * + * Two situations: + * - Either we are in a situation where + * std::is_nothrow_move_constructible::value is true. In this + * case, on insertion we just reallocate values_ when we reach its capacity + * (i.e. size_ == capacity_), otherwise we just put the new value at + * its appropriate place. We can easily keep the strong exception guarantee as + * moving the values around is safe. + * - Otherwise we are in a situation where + * std::is_nothrow_move_constructible::value is false. In this + * case on EACH insertion we allocate a new area of size_ + 1 where we + * copy the values of values_ into it and put the new value there. On + * success, we set values_ to this new area. Even if slower, it's the only + * way to preserve to strong exception guarantee. + */ + template requires (std::is_nothrow_move_constructible_v) + void insert_at_offset(allocator_type &alloc, size_type offset, Args &&...value_args) { + if (size_ < capacity_) { + insert_at_offset_no_realloc(alloc, offset, std::forward(value_args)...); + } else { + insert_at_offset_realloc(alloc, offset, next_capacity(), std::forward(value_args)...); + } + } + + template requires (!std::is_nothrow_move_constructible_v) + void insert_at_offset(allocator_type &alloc, size_type offset, Args &&...value_args) { + insert_at_offset_realloc(alloc, offset, size_ + 1, std::forward(value_args)...); + } + + template requires (std::is_nothrow_move_constructible_v) + void insert_at_offset_no_realloc(allocator_type &alloc, size_type offset, Args &&...value_args) { + assert(offset <= size_); + assert(size_ < capacity_); + + if constexpr (std::is_trivially_copyable_v) { + std::memmove(&values_[offset + 1], &values_[offset], (size_ - offset) * sizeof(value_type)); + } else { + for (size_type i = size_; i > offset; i--) { + construct_at(alloc, &values_[i], std::move(values_[i - 1])); + destroy_at(alloc, &values_[i - 1]); + } + } + + try { + construct_at(alloc, &values_[offset], std::forward(value_args)...); + } catch (...) { + // revert + if constexpr (std::is_trivially_copyable_v) { + std::memmove(&values_[offset], &values_[offset + 1], (size_ - offset) * sizeof(value_type)); + } else { + for (size_type i = offset; i < size_; i++) { + construct_at(alloc, &values_[i], std::move(values_[i + 1])); + destroy_at(alloc, &values_[i + 1]); + } + } + + throw; + } + } + + template requires (std::is_nothrow_move_constructible_v) + void insert_at_offset_realloc(allocator_type &alloc, size_type offset, + size_type new_capacity, Args &&...value_args) { + assert(new_capacity > size_); + + pointer new_values = alloc_traits::allocate(alloc, new_capacity); + assert(new_values != nullptr); // Allocate should throw if there is a failure + + try { + construct_at(alloc, &new_values[offset], std::forward(value_args)...); + } catch (...) { + alloc_traits::deallocate(alloc, new_values, new_capacity); + throw; + } + + if constexpr (std::is_trivially_copyable_v) { + if (values_ != nullptr) { + std::memcpy(&new_values[0], &values_[0], offset * sizeof(value_type)); + std::memcpy(&new_values[offset + 1], &values_[offset], (size_ - offset) * sizeof(value_type)); + } + } else { + // Cannot throw here as per requires clause + for (size_type i = 0; i < offset; i++) { + construct_at(alloc, &new_values[i], std::move(values_[i])); + } + + for (size_type i = offset; i < size_; i++) { + construct_at(alloc, &new_values[i + 1], std::move(values_[i])); + } + } + + destroy_and_deallocate_values(alloc, values_, size_, capacity_); + + values_ = new_values; + capacity_ = new_capacity; + } + + template requires (!std::is_nothrow_move_constructible_v) + void insert_at_offset_realloc(allocator_type &alloc, size_type offset, size_type new_capacity, Args &&...value_args) { + assert(new_capacity > size_); + + pointer new_values = alloc_traits::allocate(alloc, new_capacity); + assert(new_values != nullptr); // Allocate should throw if there is a failure + + size_type nb_new_values = 0; + try { + for (size_type i = 0; i < offset; i++) { + construct_at(alloc, &new_values[i], values_[i]); + nb_new_values++; + } + + construct_at(alloc, &new_values[offset], std::forward(value_args)...); + nb_new_values++; + + for (size_type i = offset; i < size_; i++) { + construct_at(alloc, &new_values[i + 1], values_[i]); + nb_new_values++; + } + } catch (...) { + destroy_and_deallocate_values(alloc, new_values, nb_new_values, new_capacity); + throw; + } + + assert(nb_new_values == size_ + 1); + + destroy_and_deallocate_values(alloc, values_, size_, capacity_); + + values_ = new_values; + capacity_ = new_capacity; + } + + /** + * Erasure + * + * Two situations: + * - Either we are in a situation where + * std::is_nothrow_move_constructible::value is true. Simply + * destroy the value and left-shift move the value on the right of offset. + * - Otherwise we are in a situation where + * std::is_nothrow_move_constructible::value is false. Copy all + * the values except the one at offset into a new heap area. On success, we + * set values_ to this new area. Even if slower, it's the only way to + * preserve to strong exception guarantee. + */ + template requires (std::is_nothrow_move_constructible_v) + void erase_at_offset([[maybe_unused]] allocator_type &alloc, size_type offset) noexcept { + assert(offset < size_); + + destroy_at(alloc, &values_[offset]); + + if constexpr (std::is_trivially_copyable_v) { + std::memmove(&values_[offset], &values_[offset + 1], (size_ - offset - 1) * sizeof(value_type)); + } else { + for (size_type i = offset + 1; i < size_; ++i) { + construct_at(alloc, &values_[i - 1], std::move(values_[i])); + destroy_at(alloc, &values_[i]); + } + } + } + + template requires (!std::is_nothrow_move_constructible_v) + void erase_at_offset(allocator_type &alloc, size_type offset) { + assert(offset < size_); + + if (offset + 1 == size_) { + // Erasing the last element, don't need to reallocate. We keep the capacity. + destroy_at(alloc, &values_[offset]); + return; + } + + assert(size_ > 1); + auto const new_capacity = size_ - 1; + + pointer new_values = alloc_traits::allocate(alloc, new_capacity); + assert(new_values != nullptr); // Allocate should throw if there is a failure + + size_type nb_new_values = 0; + try { + for (size_type i = 0; i < offset; ++i) { + construct_at(alloc, &new_values[i], values_[i]); + nb_new_values++; + } + + for (size_type i = offset + 1; i < size_; ++i) { + construct_at(alloc, &new_values[i - 1], values_[i]); + nb_new_values++; + } + } catch (...) { + destroy_and_deallocate_values(alloc, new_values, nb_new_values, new_capacity); + throw; + } + + assert(nb_new_values == size_ - 1); + + destroy_and_deallocate_values(alloc, values_, size_, capacity_); + + values_ = new_values; + capacity_ = new_capacity; + } + }; + +} // namespace dice::sparse_map::internal + +#endif//DICE_SPARSE_MAP_SPARSE_BUCKET_HPP diff --git a/include/dice/sparse_map/internal/sparse_bucket_array.hpp b/include/dice/sparse_map/internal/sparse_bucket_array.hpp new file mode 100644 index 0000000..c1233a0 --- /dev/null +++ b/include/dice/sparse_map/internal/sparse_bucket_array.hpp @@ -0,0 +1,238 @@ +#ifndef DICE_SPARSE_MAP_SPARSE_BUCKET_ARRAY_HPP +#define DICE_SPARSE_MAP_SPARSE_BUCKET_ARRAY_HPP + +#include "../sparse_props.hpp" +#include "sparse_bucket.hpp" + +namespace dice::sparse_map::internal { + + template + struct sparse_bucket_array { + private: + using element_alloc_traits = std::allocator_traits; + using bucket_alloc_traits = typename std::allocator_traits::template rebind_traits>; + + using bucket_allocator_type = typename bucket_alloc_traits::allocator_type; + using element_allocator_type = typename element_alloc_traits::allocator_type; + + public: + using bucket_type = typename bucket_alloc_traits::value_type; + using value_type = bucket_type; + using pointer = typename bucket_alloc_traits::pointer; + using const_pointer = typename bucket_alloc_traits::const_pointer; + using iterator = pointer; + using const_iterator = const_pointer; + using size_type = typename bucket_alloc_traits::size_type; + using difference_type = typename bucket_alloc_traits::difference_type; + using reference = bucket_type &; + using const_reference = bucket_type const &; + + private: + pointer buckets_ = nullptr; + size_type size_ = 0; + [[no_unique_address]] bucket_allocator_type bucket_alloc_; + [[no_unique_address]] element_allocator_type elem_alloc_; // this allocator lives here so that the allocator management code doesn't need to be written twice + + pointer make_new_buckets(size_type new_size) { + pointer new_buckets = bucket_alloc_traits::allocate(bucket_alloc_, new_size); + assert(new_buckets != nullptr); + + static_assert(std::is_nothrow_default_constructible_v); + for (size_type ix = 0; ix < new_size; ++ix) { + new (&new_buckets[ix]) bucket_type{}; + } + + return new_buckets; + } + + void resize_drop_old(size_type new_size) { + if (new_size <= size_) { + return; + } + + pointer new_buckets = make_new_buckets(new_size); + clear_deallocate(); + buckets_ = new_buckets; + size_ = new_size; + } + + void move_buckets_from(sparse_bucket_array &&other) { + resize_drop_old(other.size_); + + try { + for (size_type ix = 0; ix < other.size_; ++ix) { + new (&buckets_[ix]) bucket_type{std::move(other.buckets_[ix]), elem_alloc_}; + } + } catch (...) { + clear_deallocate(); + throw; + } + } + + void copy_buckets_from(sparse_bucket_array const &other) { + resize_drop_old(other.size_); + + try { + for (size_type ix = 0; ix < other.size_; ++ix) { + new (&buckets_[ix]) bucket_type{other.buckets_[ix], elem_alloc_}; + } + } catch (...) { + clear_deallocate(); + throw; + } + } + + void clear_deallocate() noexcept { + clear_buckets(); + forget_deallocate(); + buckets_ = nullptr; + size_ = 0; + } + + public: + explicit constexpr sparse_bucket_array(size_type size, element_allocator_type const &alloc) : buckets_{nullptr}, + size_{0}, + bucket_alloc_{alloc}, + elem_alloc_{alloc} { + if (size > max_size()) [[unlikely]] { + throw std::length_error{"Maximum sparse_bucket_array length exceeded"}; + } + + if (size == 0) { + return; + } + + size = bucket_type::nb_sparse_buckets(size); + buckets_ = make_new_buckets(size); + size_ = size; + } + + sparse_bucket_array(sparse_bucket_array const &other) : bucket_alloc_{bucket_alloc_traits::select_on_container_copy_construction(other.bucket_alloc_)}, + elem_alloc_{element_alloc_traits::select_on_container_copy_construction(other.elem_alloc_)} { + copy_buckets_from(other); + } + + constexpr sparse_bucket_array(sparse_bucket_array &&other) noexcept : buckets_{std::exchange(other.buckets_, nullptr)}, + size_{std::exchange(other.size_, 0)}, + bucket_alloc_{std::move(other.bucket_alloc_)}, + elem_alloc_{std::move(other.elem_alloc_)} { + } + + sparse_bucket_array &operator=(sparse_bucket_array const &other) { + if (this == &other) { + return *this; + } + + // can potentially reuse our existing buffer: + // propagate + eq => reuse buffer + // propagate + neq => cannot reuse buffer + // npropagate + eq => reuse buffer + // npropagate + neq => cannot reuse buffer + + if (bucket_alloc_traits::is_always_equal::value || bucket_alloc_ == other.bucket_alloc_) { + // allocator before and after are equal + clear_buckets(); + copy_buckets_from(other); + return *this; + } + + clear_deallocate(); + if constexpr (bucket_alloc_traits::propagate_on_container_copy_assignment::value) { + bucket_alloc_ = other.bucket_alloc_; + elem_alloc_ = other.elem_alloc_; + } + + copy_buckets_from(other); + return *this; + } + + sparse_bucket_array &operator=(sparse_bucket_array &&other) noexcept { + assert(this != &other); + + // we can always steal the other array's buffer except when we are not supposed to + // propagate the allocator and they are not equal + + if constexpr (!bucket_alloc_traits::propagate_on_container_move_assignment::value && !bucket_alloc_traits::is_always_equal::value) { + if (bucket_alloc_ != other.bucket_alloc_) { + move_buckets_from(std::move(other)); + return *this; + } + } + + clear_deallocate(); + buckets_ = std::exchange(other.buckets_, nullptr); + size_ = std::exchange(other.size_, 0); + bucket_alloc_ = std::move(other.bucket_alloc_); + elem_alloc_ = std::move(other.elem_alloc_); + + return *this; + } + + ~sparse_bucket_array() noexcept { + if (buckets_ == nullptr) { + return; + } + + for (size_type ix = 0; ix < size_; ++ix) { + buckets_[ix].destroy_deallocate(elem_alloc_); + } + bucket_alloc_traits::deallocate(bucket_alloc_, buckets_, size_); + } + + void swap(sparse_bucket_array &other) noexcept { + using std::swap; + + static_assert(bucket_alloc_traits::propagate_on_container_swap::value, + "Not swapping allocators is not implemented"); + + swap(buckets_, other.buckets_); + swap(size_, other.size_); + swap(bucket_alloc_, other.bucket_alloc_); + swap(elem_alloc_, other.elem_alloc_); + } + + void forget_deallocate() { + bucket_alloc_traits::deallocate(bucket_alloc_, buckets_, size_); + buckets_ = nullptr; + size_ = 0; + } + + void clear_buckets() noexcept { + for (size_type ix = 0; ix < size_; ++ix) { + buckets_[ix].clear(elem_alloc_); + } + } + + [[nodiscard]] constexpr iterator begin() noexcept { return buckets_; } + [[nodiscard]] constexpr iterator end() noexcept { return buckets_ + size_; } + [[nodiscard]] constexpr const_iterator begin() const noexcept { return buckets_; } + [[nodiscard]] constexpr const_iterator end() const noexcept { return buckets_ + size_; } + [[nodiscard]] constexpr const_iterator cbegin() const noexcept { return buckets_; } + [[nodiscard]] constexpr const_iterator cend() const noexcept { return buckets_ + size_; } + + [[nodiscard]] constexpr bool empty() const noexcept { return size_ == 0; } + [[nodiscard]] constexpr size_type size() const noexcept { return size_; } + [[nodiscard]] constexpr size_type max_size() const noexcept { return bucket_alloc_traits::max_size(bucket_alloc_); }; + + [[nodiscard]] reference operator[](size_type const ix) noexcept { + assert(ix < size_); + return buckets_[ix]; + } + + [[nodiscard]] const_reference operator[](size_type const ix) const noexcept { + assert(ix < size_); + return buckets_[ix]; + } + + [[nodiscard]] element_allocator_type &element_allocator() noexcept { + return elem_alloc_; + } + + [[nodiscard]] element_allocator_type const &element_allocator() const noexcept { + return elem_alloc_; + } + }; + +} // namespace dice::sparse_map::internal + +#endif//DICE_SPARSE_MAP_SPARSE_BUCKET_ARRAY_HPP diff --git a/include/dice/sparse_map/internal/sparse_hash.hpp b/include/dice/sparse_map/internal/sparse_hash.hpp new file mode 100644 index 0000000..0afccfd --- /dev/null +++ b/include/dice/sparse_map/internal/sparse_hash.hpp @@ -0,0 +1,945 @@ +/** + * MIT License + * + * Copyright (c) 2017 Thibaut Goetghebuer-Planchon + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef DICE_SPARSE_MAP_SPARSE_HASH_HPP +#define DICE_SPARSE_MAP_SPARSE_HASH_HPP + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sparse_bucket.hpp" +#include "sparse_bucket_array.hpp" +#include "../sparse_growth_policy.hpp" + +namespace dice::sparse_map::internal { + template + struct is_power_of_two_policy : std::false_type { + }; + + template + struct is_power_of_two_policy> : std::true_type { + }; + + /** + * Internal common class used by `sparse_map` and `sparse_set`. + * + * `ValueType` is what will be stored by `sparse_hash` (usually `std::pair` for map and `Key` for set). + * + * `k_select` should be a `FunctionObject` which takes a `ValueType` in + * parameter and returns a reference to the key. + * + * `ValueSelect` should be a `FunctionObject` which takes a `ValueType` in + * parameter and returns a reference to the value. `ValueSelect` should be void + * if there is no value (in a set for example). + * + * The strong exception guarantee only holds if `ExceptionSafety` is set to + * `dice::sh::exception_safety::strong`. + * + * `ValueType` must be nothrow move constructible and/or copy constructible. + * Behaviour is undefined if the destructor of `ValueType` throws. + * + * + * The class holds its buckets in a 2-dimensional fashion. Instead of having a + * linear `std::vector` for [0, bucket_count) where each bucket stores + * one value, we have a `std::vector` (buckets_) + * where each `sparse_array_type` stores multiple values (up to + * `sparse_array_type::discriminant_bits`). To convert a one dimensional `ibucket` + * position to a position in `std::vector` and a position in + * `sparse_array_type`, use respectively the methods + * `sparse_array_type::sparse_ibucket(ibucket)` and + * `sparse_array_type::index_in_sparse_bucket(ibucket)`. + */ + template + struct sparse_hash { + private: + template + struct get_mapped_type { + using type = void; + using const_reference = void; + using reference = void; + }; + + template requires requires { typename VSel::value_type; } + struct get_mapped_type { + using type = typename VSel::value_type; + using const_reference = type const &; + using reference = type &; + }; + + public: + template + struct sparse_iterator; + + using key_type = typename KeyValueSelect::key_type; + using mapped_type = typename get_mapped_type::type; + using mapped_const_reference = typename get_mapped_type::const_reference; + using mapped_reference = typename get_mapped_type::reference; + using value_type = ValueType; + using hasher = Hash; + using key_equal = KeyEqual; + using allocator_type = Allocator; + using growth_policy = GrowthPolicy; + using reference = value_type &; + using const_reference = value_type const &; + using size_type = typename std::allocator_traits::size_type; + using pointer = typename std::allocator_traits::pointer; + using const_pointer = typename std::allocator_traits::const_pointer; + using difference_type = typename std::allocator_traits::difference_type; + using iterator = sparse_iterator; + using const_iterator = sparse_iterator; + + static constexpr size_type default_init_bucket_count = 0; + + static constexpr float max_load_factor = static_cast(MaxLoadFactor::num) / static_cast(MaxLoadFactor::den); + static_assert(max_load_factor >= 0.1f && max_load_factor <= 0.8f, + "Specified invalid MaxLoadFactor, must be in range [0.1, 0.8]"); + + private: + [[nodiscard]] static constexpr size_type calc_load_threshold_rehash(size_type bucket_count) noexcept { + return size_type(float(bucket_count) * max_load_factor); + } + + [[nodiscard]] static constexpr size_type calc_load_threshold_clear_deleted(size_type bucket_count) noexcept { + float const max_load_factor_with_deleted_buckets = max_load_factor + 0.5f * (1.0f - max_load_factor); + assert(max_load_factor_with_deleted_buckets > 0.0f && max_load_factor_with_deleted_buckets <= 1.0f); + + return size_type(float(bucket_count) * max_load_factor_with_deleted_buckets); + } + + + static constexpr bool has_mapped_type = !std::is_same_v; + + using sparse_bucket_array_type = sparse_bucket_array; + using sparse_bucket_type = typename sparse_bucket_array_type::bucket_type; + + private: + growth_policy gpol_; + + sparse_bucket_array_type buckets_; + size_type bucket_count_; + + size_type n_elements_; + size_type n_deleted_elements_; + + /** + * Maximum that size_ can reach before a rehash occurs automatically + * to grow the hash table. + */ + size_type load_threshold_rehash_; + + /** + * Maximum that size_ + n_deleted_elements_ can reach before cleaning + * up the buckets marked as deleted. + */ + size_type load_threshold_clear_deleted_; + + [[no_unique_address]] hasher h_; + [[no_unique_address]] key_equal keq_; + + public: + template + struct sparse_iterator { + private: + friend sparse_hash; + + using sparse_bucket_array_iterator = std::conditional_t; + + using sparse_bucket_iterator = std::conditional_t; + + private: + sparse_bucket_array_iterator cur_bucket_; + sparse_bucket_array_iterator end_bucket_; + sparse_bucket_iterator bucket_it_; + + private: + /** + * sparse_array_it should be nullptr if sparse_bucket_it == + * buckets_.end(). (TODO better way?) + */ + sparse_iterator(sparse_bucket_array_iterator bucket, + sparse_bucket_array_iterator end_bucket, + sparse_bucket_iterator bucket_it) noexcept : cur_bucket_{bucket}, + end_bucket_{end_bucket}, + bucket_it_{bucket_it} { + + assert((cur_bucket_ == end_bucket_ && bucket_it_ == nullptr) + || (cur_bucket_ != end_bucket_ && bucket_it_ != nullptr)); + } + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = const typename sparse_hash::value_type; + using difference_type = std::ptrdiff_t; + using reference = std::conditional_t; + using pointer = std::conditional_t::template rebind_traits::const_pointer, + typename std::allocator_traits::template rebind_traits::pointer>; + + // Copy constructor from iterator to const_iterator. + sparse_iterator(sparse_iterator const &other) noexcept requires (IsConst) : cur_bucket_{other.cur_bucket_}, + end_bucket_{other.end_bucket_}, + bucket_it_{other.bucket_it_} { + } + + sparse_iterator(sparse_iterator const &other) noexcept = default; + sparse_iterator(sparse_iterator &&other) noexcept = default; + sparse_iterator &operator=(sparse_iterator const &other) noexcept = default; + sparse_iterator &operator=(sparse_iterator &&other) noexcept = default; + + reference operator*() const noexcept { return KeyValueSelect::both(*bucket_it_); } + pointer operator->() const noexcept { return &KeyValueSelect::both(*bucket_it_); } + + sparse_iterator &operator++() noexcept { + assert(bucket_it_ != nullptr); + ++bucket_it_; + + if (bucket_it_ != (*cur_bucket_).end()) { + return *this; + } + + do { + if (++cur_bucket_ == end_bucket_) { + bucket_it_ = nullptr; + return *this; + } + } while ((*cur_bucket_).empty()); + + bucket_it_ = (*cur_bucket_).begin(); + return *this; + } + + sparse_iterator operator++(int) noexcept { + auto tmp = *this; + ++*this; + return tmp; + } + + template + bool operator==(sparse_iterator const &other) const noexcept { + return cur_bucket_ == other.cur_bucket_ && bucket_it_ == other.bucket_it_; + } + + template + bool operator!=(sparse_iterator const &other) const noexcept { + return cur_bucket_ != other.cur_bucket_ || bucket_it_ != other.bucket_it_; + } + + template + bool operator<(sparse_iterator const &other) const noexcept { + return cur_bucket_ < other.cur_bucket_ || (cur_bucket_ == other.cur_bucket_ && bucket_it_ < other.bucket_it_); + } + }; + + iterator mutable_iterator(const_iterator pos) noexcept { + // SAFETY: this is non-const therefore the underlying buckets are also non-const + // as evidenced by the fact that we can call begin on them + auto it_sparse_buckets = buckets_.begin() + std::distance(buckets_.cbegin(), pos.cur_bucket_); + + // SAFETY: this is non-const therefore the underlying sparse array is also non-const + auto it_array = sparse_bucket_type::unsafe_mutable_iterator(pos.bucket_it_); + + return iterator{it_sparse_buckets, buckets_.end(), it_array}; + } + + public: + sparse_hash(size_type bucket_count, + hasher const &hash, + key_equal const &equal, + allocator_type const &alloc) : gpol_{bucket_count}, + buckets_{bucket_count, alloc}, + bucket_count_{bucket_count}, + n_elements_{0}, + n_deleted_elements_{0}, + load_threshold_rehash_{calc_load_threshold_rehash(bucket_count)}, + load_threshold_clear_deleted_{calc_load_threshold_clear_deleted(bucket_count)}, + h_{hash}, + keq_{equal} { + + // Check in the constructor instead of outside of a function to avoid + // compilation issues when value_type is not complete. + static_assert(std::is_nothrow_move_constructible::value || + std::is_copy_constructible::value, + "Key, and T if present, must be nothrow move constructible " + "and/or copy constructible."); + } + + sparse_hash(sparse_hash const &other) = default; + sparse_hash &operator=(sparse_hash const &other) = default; + + sparse_hash(sparse_hash &&other) noexcept : gpol_{std::move(other.gpol_)}, + buckets_{std::move(other.buckets_)}, + bucket_count_{std::exchange(other.bucket_count_, 0)}, + n_elements_{std::exchange(other.n_elements_, 0)}, + n_deleted_elements_{std::exchange(other.n_deleted_elements_, 0)}, + load_threshold_rehash_{std::exchange(other.load_threshold_rehash_, 0)}, + load_threshold_clear_deleted_{std::exchange(other.load_threshold_clear_deleted_, 0)}, + h_{std::move(other.h_)}, + keq_{std::move(other.keq_)} { + other.gpol_.clear(); + } + + sparse_hash &operator=(sparse_hash &&other) noexcept { + assert(this != &other); + + gpol_ = std::move(other.gpol_); + other.gpol_.clear(); + + buckets_ = std::move(other.buckets_); + bucket_count_ = std::exchange(other.bucket_count_, 0); + n_elements_ = std::exchange(other.n_elements_, 0); + n_deleted_elements_ = std::exchange(other.n_deleted_elements_, 0); + load_threshold_rehash_ = std::exchange(other.load_threshold_rehash_, 0); + load_threshold_clear_deleted_ = std::exchange(other.load_threshold_clear_deleted_, 0); + + h_ = std::move(other.h_); + keq_ = std::move(other.keq_); + + return *this; + } + + ~sparse_hash() = default; + + allocator_type get_allocator() const { + return buckets_.element_allocator(); + } + + [[nodiscard]] iterator begin() noexcept { + auto begin = buckets_.begin(); + while (begin != buckets_.end() && begin->empty()) { + ++begin; + } + + return iterator{begin, + buckets_.end(), + begin != buckets_.end() ? begin->begin() : nullptr}; + } + + [[nodiscard]] const_iterator begin() const noexcept { + return cbegin(); + } + + [[nodiscard]] const_iterator cbegin() const noexcept { + auto begin = buckets_.begin(); + while (begin != buckets_.end() && begin->empty()) { + ++begin; + } + + return const_iterator{begin, + buckets_.end(), + begin != buckets_.end() ? begin->begin() : nullptr}; + } + + [[nodiscard]] iterator end() noexcept { + return iterator{buckets_.end(), + buckets_.end(), + nullptr}; + } + + [[nodiscard]] const_iterator end() const noexcept { + return cend(); + } + + [[nodiscard]] const_iterator cend() const noexcept { + return const_iterator{buckets_.end(), + buckets_.end(), + nullptr}; + } + + [[nodiscard]] bool empty() const noexcept { return n_elements_ == 0; } + + [[nodiscard]] size_type size() const noexcept { return n_elements_; } + + [[nodiscard]] size_type max_size() const noexcept { + return std::min(std::allocator_traits::max_size(), + buckets_.max_size()); + } + + void clear() noexcept { + buckets_.clear_buckets(); + n_elements_ = 0; + n_deleted_elements_ = 0; + } + + template + std::pair insert(P &&value) { + return insert_impl(KeyValueSelect::key(value), std::forward

(value)); + } + + template + iterator insert_hint(const_iterator hint, P &&value) { + if (hint != cend() && keq_(KeyValueSelect::key(*hint), KeyValueSelect::key(value))) { + return mutable_iterator(hint); + } + + return insert(std::forward

(value)).first; + } + + template + void insert(InputIt first, InputIt last) { + if constexpr (std::is_base_of_v::iterator_category>) { + const auto nb_elements_insert = std::distance(first, last); + const size_type nb_free_buckets = load_threshold_rehash_ - size(); + assert(load_threshold_rehash_ >= size()); + + if (nb_elements_insert > 0 && + nb_free_buckets < size_type(nb_elements_insert)) { + reserve(size() + size_type(nb_elements_insert)); + } + } + + for (; first != last; ++first) { + insert(*first); + } + } + + template + std::pair insert_or_assign(K &&key, M &&obj) { + auto it = try_emplace(std::forward(key), std::forward(obj)); + if (!it.second) { + it.first->second = std::forward(obj); + } + + return it; + } + + template + iterator insert_or_assign(const_iterator hint, K &&key, M &&obj) { + if (hint != cend() && keq_(KeyValueSelect::key(*hint), key)) { + auto it = mutable_iterator(hint); + it->second = std::forward(obj); + + return it; + } + + return insert_or_assign(std::forward(key), std::forward(obj)).first; + } + + template + std::pair emplace(Args &&...args) { + return insert(value_type(std::forward(args)...)); + } + + template + iterator emplace_hint(const_iterator hint, Args &&...args) { + return insert_hint(hint, value_type(std::forward(args)...)); + } + + template + std::pair try_emplace(K &&key, Args &&...args) { + return insert_impl(key, std::piecewise_construct, + std::forward_as_tuple(std::forward(key)), + std::forward_as_tuple(std::forward(args)...)); + } + + template + iterator try_emplace_hint(const_iterator hint, K &&key, Args &&...args) { + if (hint != cend() && keq_(KeyValueSelect::key(*hint), key)) { + return mutable_iterator(hint); + } + + return try_emplace(std::forward(key), std::forward(args)...).first; + } + + /** + * Here to avoid `template size_type erase(const K& key)` being used + * when we use an iterator instead of a const_iterator. + */ + iterator erase(iterator pos) { + assert(pos != end() && n_elements_ > 0); + //vector iterator with fancy pointers have a problem with -> + auto next_bucket_it = pos.cur_bucket_->erase(buckets_.element_allocator(), pos.bucket_it_); + n_elements_--; + n_deleted_elements_++; + + if (next_bucket_it != pos.cur_bucket_->end()) { + return iterator{pos.cur_bucket_, + buckets_.end(), + next_bucket_it}; + } + + auto it_sparse_buckets_next = pos.cur_bucket_; + do { + ++it_sparse_buckets_next; + } while (it_sparse_buckets_next != buckets_.end() + && (*it_sparse_buckets_next).empty()); + + if (it_sparse_buckets_next == buckets_.end()) { + return end(); + } else { + return iterator{it_sparse_buckets_next, + buckets_.end(), + it_sparse_buckets_next->begin()}; + } + } + + iterator erase(const_iterator pos) { + return erase(mutable_iterator(pos)); + } + + [[deprecated("This function is originally from tsl::sparse_hash but it doesn't really make much sense to use, because\n" + "the items between two iterators are effectively random (they are not sorted or anything) so you don't know what you were deleting " + "(except if you manually checked all of them, in which case you could have just deleted them then)\n" + "So this function is effectively just: 'please erase distance(first, last) number of random elements'")]] + iterator erase(const_iterator first, const_iterator last) { + auto const nb_elements_to_erase = static_cast(std::distance(first, last)); + auto to_delete = mutable_iterator(first); + for (size_type i = 0; i < nb_elements_to_erase; ++i) { + to_delete = erase(to_delete); + } + + return to_delete; + } + + template + size_type erase(K const &key) { + return erase(key, h_(key)); + } + + template + size_type erase(K const &key, std::size_t hash) { + return erase_impl(key, hash); + } + + void swap(sparse_hash &other) noexcept { + using std::swap; + + swap(buckets_, other.buckets_); + swap(bucket_count_, other.bucket_count_); + swap(n_elements_, other.n_elements_); + swap(n_deleted_elements_, other.n_deleted_elements_); + swap(load_threshold_rehash_, other.load_threshold_rehash_); + swap(load_threshold_clear_deleted_, other.load_threshold_clear_deleted_); + swap(h_, other.h_); + swap(keq_, other.keq_); + swap(gpol_, other.gpol_); + } + + template requires (has_mapped_type) + mapped_reference at(K const &key) { + return at_impl(*this, key, h_(key)); + } + + template requires (has_mapped_type) + mapped_reference at(K const &key, std::size_t hash) { + return at_impl(*this, key, hash); + } + + template requires (has_mapped_type) + mapped_const_reference at(K const &key) const { + return at_impl(*this, key, h_(key)); + } + + template requires (has_mapped_type) + mapped_const_reference at(K const &key, std::size_t hash) const { + return at_impl(*this, key, hash); + } + + template requires (has_mapped_type) + mapped_reference operator[](K &&key) { + return try_emplace(std::forward(key)).first->second; + } + + template + [[nodiscard]] bool contains(K const &key) const noexcept { + return find(key, h_(key)) != cend(); + } + + template + [[nodiscard]] bool contains(K const &key, std::size_t hash) const noexcept { + return find(key, hash) != cend(); + } + + template + [[nodiscard]] size_type count(K const &key) const noexcept { + return count(key, h_(key)); + } + + template + [[nodiscard]] size_type count(K const &key, std::size_t hash) const noexcept { + return static_cast(find(key, hash) != cend()); + } + + template + [[nodiscard]] iterator find(K const &key) noexcept { + return find_impl(*this, key, h_(key)); + } + + template + [[nodiscard]] iterator find(K const &key, std::size_t hash) noexcept { + return find_impl(*this, key, hash); + } + + template + [[nodiscard]] const_iterator find(K const &key) const noexcept { + return find_impl(*this, key, h_(key)); + } + + template + [[nodiscard]] const_iterator find(K const &key, std::size_t hash) const noexcept { + return find_impl(*this, key, hash); + } + + template + std::pair equal_range(K const &key) noexcept { + return equal_range(key, h_(key)); + } + + template + std::pair equal_range(K const &key, std::size_t hash) noexcept { + iterator it = find(key, hash); + return std::make_pair(it, it == end() ? it : std::next(it)); + } + + template + std::pair equal_range(K const &key) const noexcept { + return equal_range(key, h_(key)); + } + + template + std::pair equal_range(K const &key, std::size_t hash) const noexcept { + const_iterator it = find(key, hash); + return std::make_pair(it, (it == cend()) ? it : std::next(it)); + } + + [[nodiscard]] size_type bucket_count() const noexcept { + return bucket_count_; + } + + [[nodiscard]] size_type max_bucket_count() const noexcept { + return buckets_.max_size(); + } + + [[nodiscard]] float load_factor() const noexcept { + if (bucket_count_ == 0) { + return 0; + } + + return static_cast(n_elements_) / static_cast(bucket_count_); + } + + void rehash(size_type count) { + count = std::max(count, size_type(std::ceil(float(size()) / max_load_factor))); + rehash_impl(count); + } + + void reserve(size_type count) { + rehash(size_type(std::ceil(float(count) / max_load_factor))); + } + + [[nodiscard]] hasher hash_function() const { return h_; } + [[nodiscard]] key_equal key_eq() const { return keq_; } + + private: + [[nodiscard]] size_type bucket_for_hash(std::size_t hash) const noexcept { + auto const bucket = gpol_.bucket_for_hash(hash); + assert(sparse_bucket_type::sparse_ibucket(bucket) < buckets_.size() + || (bucket == 0 && buckets_.empty())); + + return bucket; + } + + [[nodiscard]] size_type next_bucket(size_type ibucket, [[maybe_unused]] size_type iprobe) const noexcept requires (is_power_of_two_policy::value) { + if constexpr (Probing == probing::linear) { + return (ibucket + 1) & gpol_.mask(); + } else { + assert(Probing == probing::quadratic); + return (ibucket + iprobe) & gpol_.mask(); + } + } + + [[nodiscard]] size_type next_bucket(size_type ibucket, [[maybe_unused]] size_type iprobe) const noexcept requires (!is_power_of_two_policy::value) { + if constexpr (Probing == probing::linear) { + ibucket++; + return ibucket != bucket_count_ ? ibucket : 0; + } else { + assert(Probing == probing::quadratic); + ibucket += iprobe; + return ibucket < bucket_count_ ? ibucket : ibucket % bucket_count_; + } + } + + template + std::pair insert_impl(K const &key, Args &&...value_type_args) { + if (buckets_.empty()) [[unlikely]] { + rehash_impl(gpol_.next_bucket_count()); + } + + /** + * We must insert the value in the first empty or deleted bucket we find. If + * we first find a deleted bucket, we still have to continue the search + * until we find an empty bucket or until we have searched all the buckets + * to be sure that the value is not in the hash table. We thus remember the + * position, if any, of the first deleted bucket we have encountered so we + * can insert it there if needed. + */ + bool found_first_deleted_bucket = false; + std::size_t sparse_ibucket_first_deleted = 0; + typename sparse_bucket_type::size_type index_in_sparse_bucket_first_deleted = 0; + + auto const hash = h_(key); + auto ibucket = bucket_for_hash(hash); + + std::size_t probe = 0; + while (true) { + auto const sparse_ibucket = sparse_bucket_type::sparse_ibucket(ibucket); + auto const index_in_sparse_bucket = sparse_bucket_type::index_in_sparse_bucket(ibucket); + + if (buckets_[sparse_ibucket].has_value(index_in_sparse_bucket)) { + auto value_it = buckets_[sparse_ibucket].value(index_in_sparse_bucket); + if (keq_(key, KeyValueSelect::key(*value_it))) { + return std::make_pair(iterator{std::next(buckets_.begin(), sparse_ibucket), + buckets_.end(), + value_it}, + false); + } + } else if (buckets_[sparse_ibucket].has_deleted_value(index_in_sparse_bucket) && probe < bucket_count_) { + if (!found_first_deleted_bucket) { + found_first_deleted_bucket = true; + sparse_ibucket_first_deleted = sparse_ibucket; + index_in_sparse_bucket_first_deleted = index_in_sparse_bucket; + } + } else { + /** + * At this point we are sure that the value does not exist + * in the hash table. + * First check if we satisfy load and delete thresholds, and if not, + * rehash the hash table (and therefore start over). Otherwise, just + * insert the value into the appropriate bucket. + */ + if (size() >= load_threshold_rehash_) { + rehash_impl(gpol_.next_bucket_count()); + return insert_impl(key, std::forward(value_type_args)...); + } + + if (size() + n_deleted_elements_ >= load_threshold_clear_deleted_) { + clear_deleted_buckets(); + return insert_impl(key, std::forward(value_type_args)...); + } + + if (found_first_deleted_bucket) { + auto it = insert_in_bucket(sparse_ibucket_first_deleted, + index_in_sparse_bucket_first_deleted, + std::forward(value_type_args)...); + n_deleted_elements_ -= 1; + return it; + } + + return insert_in_bucket(sparse_ibucket, index_in_sparse_bucket, + std::forward(value_type_args)...); + } + + probe++; + ibucket = next_bucket(ibucket, probe); + } + } + + template + std::pair insert_in_bucket(std::size_t sparse_ibucket, + typename sparse_bucket_type::size_type index_in_sparse_bucket, + Args &&...value_type_args) { + // is not called when empty + auto value_it = buckets_[sparse_ibucket].set(buckets_.element_allocator(), + index_in_sparse_bucket, + std::forward(value_type_args)...); + n_elements_++; + + return std::make_pair(iterator{std::next(buckets_.begin(), sparse_ibucket), + buckets_.end(), + value_it}, + true); + } + + template + size_type erase_impl(K const &key, std::size_t hash) { + if (buckets_.empty()) { + return 0; + } + + std::size_t ibucket = bucket_for_hash(hash); + std::size_t probe = 0; + + while (true) { + auto const sparse_ibucket = sparse_bucket_type::sparse_ibucket(ibucket); + auto const index_in_sparse_bucket = sparse_bucket_type::index_in_sparse_bucket(ibucket); + + auto &bucket = buckets_[sparse_ibucket]; + + if (bucket.has_value(index_in_sparse_bucket)) { + auto value_it = bucket.value(index_in_sparse_bucket); + + if (keq_(key, KeyValueSelect::key(*value_it))) { + bucket.erase(buckets_.element_allocator(), value_it, index_in_sparse_bucket); + n_elements_ -= 1; + n_deleted_elements_ += 1; + + return 1; + } + } else if (!bucket.has_deleted_value(index_in_sparse_bucket) || probe >= bucket_count_) { + return 0; + } + + probe++; + ibucket = next_bucket(ibucket, probe); + } + } + + template + [[nodiscard]] static auto find_impl(Self &&self, K const &key, std::size_t hash) noexcept { + if (self.buckets_.empty()) { + return self.end(); + } + + std::size_t ibucket = self.bucket_for_hash(hash); + std::size_t probe = 0; + + while (true) { + auto const sparse_ibucket = sparse_bucket_type::sparse_ibucket(ibucket); + auto const index_in_sparse_bucket = sparse_bucket_type::index_in_sparse_bucket(ibucket); + + auto &bucket = self.buckets_[sparse_ibucket]; + + if (bucket.has_value(index_in_sparse_bucket)) { + auto value_it = bucket.value(index_in_sparse_bucket); + if (self.keq_(key, KeyValueSelect::key(*value_it))) { + static constexpr bool is_const = std::is_const_v>; + + return sparse_iterator{std::next(self.buckets_.begin(), sparse_ibucket), + self.buckets_.end(), + value_it}; + } + } else if (!bucket.has_deleted_value(index_in_sparse_bucket) || probe >= self.bucket_count_) { + return self.end(); + } + + probe++; + ibucket = self.next_bucket(ibucket, probe); + } + } + + template + static decltype(auto) at_impl(Self &&self, K const &key, std::size_t hash) { + if (auto it = find_impl(self, key, hash); it != self.end()) { + return KeyValueSelect::value(*it); + } + + throw std::out_of_range{"Couldn't find key."}; + } + + void clear_deleted_buckets() { + // TODO could be optimized, we could do it in-place instead of allocating a + // new bucket array. + rehash_impl(bucket_count_); + assert(n_deleted_elements_ == 0); + } + + void rehash_impl(size_type count) requires (ExceptionSafety == exception_safety::basic) { + sparse_hash new_table(count, h_, keq_, buckets_.element_allocator()); + + for (auto &bucket : buckets_) { + for (auto &val : bucket) { + new_table.insert_on_rehash(std::move(val)); + } + + bucket.destroy_deallocate(buckets_.element_allocator()); + } + + buckets_.forget_deallocate(); + new_table.swap(*this); + } + + /** + * TODO: For now we copy each element into the new map. We could move + * them if they are nothrow_move_constructible without triggering + * any exception if we reserve enough space in the sparse arrays beforehand. + */ + void rehash_impl(size_type count) requires (ExceptionSafety == exception_safety::strong) { + sparse_hash new_table(count, h_, keq_, buckets_.element_allocator()); + + for (auto const &bucket : buckets_) { + for (auto const &val : bucket) { + new_table.insert_on_rehash(val); + } + } + + new_table.swap(*this); + } + + template + void insert_on_rehash(K &&key_value) { + key_type const &key = KeyValueSelect::key(key_value); + + std::size_t const hash = h_(key); + std::size_t ibucket = bucket_for_hash(hash); + std::size_t probe = 0; + + while (true) { + auto const sparse_ibucket = sparse_bucket_type::sparse_ibucket(ibucket); + auto const index_in_sparse_bucket = sparse_bucket_type::index_in_sparse_bucket(ibucket); + + auto &bucket = buckets_[sparse_ibucket]; + + if (!bucket.has_value(index_in_sparse_bucket)) { + bucket.set(buckets_.element_allocator(), index_in_sparse_bucket, std::forward(key_value)); + n_elements_++; + + return; + } else { + assert(!keq_(key, KeyValueSelect::key(*bucket.value(index_in_sparse_bucket)))); + } + + probe++; + ibucket = next_bucket(ibucket, probe); + } + } + }; + +} // namespace dice::sparse_map::internal + +#endif//DICE_SPARSE_MAP_SPARSE_HASH_HPP diff --git a/include/dice/sparse_map/sparse_growth_policy.hpp b/include/dice/sparse_map/sparse_growth_policy.hpp new file mode 100644 index 0000000..ba296c9 --- /dev/null +++ b/include/dice/sparse_map/sparse_growth_policy.hpp @@ -0,0 +1,265 @@ +/** + * MIT License + * + * Copyright (c) 2017 Thibaut Goetghebuer-Planchon + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef DICE_SPARSE_MAP_SPARSE_GROWTH_POLICY_HPP +#define DICE_SPARSE_MAP_SPARSE_GROWTH_POLICY_HPP + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace dice::sparse_map { + + template + concept growth_policy = requires (G const cgpol, G gpol, std::size_t &min_bucket_count_in_out, std::size_t hash) { + G{min_bucket_count_in_out}; + { cgpol.bucket_for_hash(hash) } -> std::convertible_to; + { cgpol.next_bucket_count() } -> std::convertible_to; + { G::max_bucket_count() } -> std::convertible_to; + gpol.clear(); + + noexcept(cgpol.bucket_for_hash(hash)); + noexcept(G::max_bucket_count()); + noexcept(gpol.clear()); + }; + + /** + * Grow the hash table by a factor of GrowthFactor keeping the bucket count to a + * power of two. It allows the table to use a mask operation instead of a modulo + * operation to map a hash to a bucket. + * + * GrowthFactor must be a power of two >= 2. + */ + template requires (std::has_single_bit(GrowthFactor) && GrowthFactor >= 2) + struct power_of_two_growth_policy { + protected: + std::size_t m_mask; + + public: + /** + * Called on the hash table creation and on rehash. The number of buckets for + * the table is passed in parameter. This number is a minimum, the policy may + * update this value with a higher value if needed (but not lower). + * + * If 0 is given, min_bucket_count_in_out must still be 0 after the policy + * creation and bucket_for_hash must always return 0 in this case. + */ + explicit constexpr power_of_two_growth_policy(std::size_t &min_bucket_count_in_out) { + if (min_bucket_count_in_out > max_bucket_count()) [[unlikely]] { + throw std::length_error{"The hash table exceeds its maximum size."}; + } + + if (min_bucket_count_in_out > 0) { + min_bucket_count_in_out = std::bit_ceil(min_bucket_count_in_out); + m_mask = min_bucket_count_in_out - 1; + } else { + m_mask = 0; + } + } + + /** + * Return the bucket [0, bucket_count()) to which the hash belongs. + * If bucket_count() is 0, it must always return 0. + */ + [[nodiscard]] constexpr std::size_t bucket_for_hash(std::size_t hash) const noexcept { + return hash & m_mask; + } + + /** + * Return the number of buckets that should be used on next growth. + */ + [[nodiscard]] constexpr std::size_t next_bucket_count() const { + if ((m_mask + 1) > max_bucket_count() / GrowthFactor) [[unlikely]] { + throw std::length_error("The hash table exceeds its maximum size."); + } + + return (m_mask + 1) * GrowthFactor; + } + + /** + * Return the maximum number of buckets supported by the policy. + */ + [[nodiscard]] static constexpr std::size_t max_bucket_count() noexcept { + // Largest power of two. + return (std::numeric_limits::max() / 2) + 1; + } + + /** + * Reset the growth policy as if it was created with a bucket count of 0. + * After a clear, the policy must always return 0 when bucket_for_hash is + * called. + */ + constexpr void clear() noexcept { + m_mask = 0; + } + + [[nodiscard]] constexpr std::size_t mask() const noexcept { + return m_mask; + } + }; + + /** + * Grow the hash table by GrowthFactor::num / GrowthFactor::den and use a modulo + * to map a hash to a bucket. Slower but it can be useful if you want a slower + * growth. + */ + template> + struct mod_growth_policy { + protected: + static constexpr double REHASH_SIZE_MULTIPLICATION_FACTOR = 1.0 * GrowthFactor::num / GrowthFactor::den; + static_assert(REHASH_SIZE_MULTIPLICATION_FACTOR >= 1.1, "Growth factor should be >= 1.1."); + + static constexpr std::size_t MAX_BUCKET_COUNT = static_cast(static_cast(std::numeric_limits::max()) / REHASH_SIZE_MULTIPLICATION_FACTOR); + + std::size_t m_mod; + + public: + explicit constexpr mod_growth_policy(std::size_t &min_bucket_count_in_out) { + if (min_bucket_count_in_out > max_bucket_count()) [[unlikely]] { + throw std::length_error{"The hash table exceeds its maximum size."}; + } + + if (min_bucket_count_in_out > 0) { + m_mod = min_bucket_count_in_out; + } else { + m_mod = 1; + } + } + + [[nodiscard]] constexpr std::size_t bucket_for_hash(std::size_t hash) const noexcept { + return hash % m_mod; + } + + [[nodiscard]] constexpr std::size_t next_bucket_count() const { + if (m_mod == max_bucket_count()) [[unlikely]] { + throw std::length_error{"The hash table exceeds its maximum size."}; + } + + auto const next_bucket_count = std::ceil(static_cast(m_mod) * REHASH_SIZE_MULTIPLICATION_FACTOR); + if (!std::isnormal(next_bucket_count)) [[unlikely]] { + throw std::length_error{"The hash table exceeds its maximum size."}; + } + + if (next_bucket_count > static_cast(max_bucket_count())) { + return max_bucket_count(); + } + + return static_cast(next_bucket_count); + } + + [[nodiscard]] static constexpr std::size_t max_bucket_count() noexcept { + return MAX_BUCKET_COUNT; + } + + void clear() noexcept { + m_mod = 1; + } + }; + + /** + * Grow the hash table by using prime numbers as bucket count. Slower than + * dice::sh::power_of_two_growth_policy in general but will probably distribute + * the values around better in the buckets with a poor hash function. + * + * To allow the compiler to optimize the modulo operation, a lookup table is + * used with constant primes numbers. + * + * With a switch the code would look like: + * \code + * switch(iprime) { // iprime is the current prime of the hash table + * case 0: hash % 5ul; + * break; + * case 1: hash % 17ul; + * break; + * case 2: hash % 29ul; + * break; + * ... + * } + * \endcode + * + * Due to the constant variable in the modulo the compiler is able to optimize + * the operation by a series of multiplications, substractions and shifts. + * + * The 'hash % 5' could become something like 'hash - (hash * 0xCCCCCCCD) >> 34) + * * 5' in a 64 bits environment. + */ + struct prime_growth_policy { + protected: + static constexpr std::array PRIMES{1ul, 5ul, 17ul, 29ul, 37ul, + 53ul, 67ul, 79ul, 97ul, 131ul, + 193ul, 257ul, 389ul, 521ul, 769ul, + 1031ul, 1543ul, 2053ul, 3079ul, 6151ul, + 12289ul, 24593ul, 49157ul, 98317ul, 196613ul, + 393241ul, 786433ul, 1572869ul, 3145739ul, 6291469ul, + 12582917ul, 25165843ul, 50331653ul, 100663319ul, 201326611ul, + 402653189ul, 805306457ul, 1610612741ul, 3221225473ul, 4294967291ul}; + + std::uint8_t m_iprime; + + public: + explicit constexpr prime_growth_policy(std::size_t &min_bucket_count_in_out) { + auto it_prime = std::lower_bound(PRIMES.begin(), PRIMES.end(), min_bucket_count_in_out); + if (it_prime == PRIMES.end()) [[unlikely]] { + throw std::length_error{"The hash table exceeds its maximum size."}; + } + + m_iprime = static_cast(std::distance(PRIMES.begin(), it_prime)); + + if (min_bucket_count_in_out > 0) { + min_bucket_count_in_out = *it_prime; + } else { + min_bucket_count_in_out = 0; + } + } + + [[nodiscard]] constexpr std::size_t bucket_for_hash(std::size_t hash) const noexcept { + return hash % PRIMES[m_iprime]; + } + + [[nodiscard]] constexpr std::size_t next_bucket_count() const { + if (static_cast(m_iprime) + 1 >= PRIMES.size()) { + throw std::length_error("The hash table exceeds its maximum size."); + } + + return PRIMES[m_iprime + 1]; + } + + [[nodiscard]] static constexpr std::size_t max_bucket_count() noexcept { + return PRIMES.back(); + } + + constexpr void clear() noexcept { + m_iprime = 0; + } + }; + +}// namespace dice::sparse_map + +#endif diff --git a/include/dice/sparse_map/sparse_map.hpp b/include/dice/sparse_map/sparse_map.hpp new file mode 100644 index 0000000..f314993 --- /dev/null +++ b/include/dice/sparse_map/sparse_map.hpp @@ -0,0 +1,698 @@ +/** + * MIT License + * + * Copyright (c) 2017 Thibaut Goetghebuer-Planchon + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef DICE_SPARSE_MAP_SPARSE_MAP_HPP +#define DICE_SPARSE_MAP_SPARSE_MAP_HPP + +#include +#include +#include +#include +#include +#include + +#include "internal/sparse_hash.hpp" + +namespace dice::sparse_map { + + /** + * Implementation of a sparse hash map using open-addressing with quadratic + * probing. The goal on the hash map is to be the most memory efficient + * possible, even at low load factor, while keeping reasonable performances. + * + * `GrowthPolicy` defines how the map grows and consequently how a hash value is + * mapped to a bucket. By default the map uses + * `dice::sh::power_of_two_growth_policy`. This policy keeps the number of + * buckets to a power of two and uses a mask to map the hash to a bucket instead + * of the slow modulo. Other growth policies are available and you may define + * your own growth policy, check `dice::sh::power_of_two_growth_policy` for the + * interface. + * + * `ExceptionSafety` defines the exception guarantee provided by the class. By + * default only the basic exception safety is guaranteed which mean that all + * resources used by the hash map will be freed (no memory leaks) but the hash + * map may end-up in an undefined state if an exception is thrown (undefined + * here means that some elements may be missing). This can ONLY happen on rehash + * (either on insert or if `rehash` is called explicitly) and will occur if the + * Allocator can't allocate memory (`std::bad_alloc`) or if the copy constructor + * (when a nothrow move constructor is not available) throws an exception. This + * can be avoided by calling `reserve` beforehand. This basic guarantee is + * similar to the one of `google::sparse_hash_map` and `spp::sparse_hash_map`. + * It is possible to ask for the strong exception guarantee with + * `dice::sh::exception_safety::strong`, the drawback is that the map will be + * slower on rehashes and will also need more memory on rehashes. + * + * `Sparsity` defines how much the hash set will compromise between insertion + * speed and memory usage. A high sparsity means less memory usage but longer + * insertion times, and vice-versa for low sparsity. The default + * `dice::sh::sparsity::medium` sparsity offers a good compromise. It doesn't + * change the lookup speed. + * + * `Key` and `T` must be nothrow move constructible and/or copy constructible. + * + * If the destructor of `Key` or `T` throws an exception, the behaviour of the + * class is undefined. + * + * Iterators invalidation: + * - clear, operator=, reserve, rehash: always invalidate the iterators. + * - insert, emplace, emplace_hint, operator[]: if there is an effective + * insert, invalidate the iterators. + * - erase: always invalidate the iterators. + */ + template, + typename KeyEqual = std::equal_to, + typename Allocator = std::allocator>, + growth_policy GrowthPolicy = power_of_two_growth_policy<2>, + exception_safety ExceptionSafety = exception_safety::basic, + sparsity Sparsity = sparsity::medium, + ratio MaxLoadFactor = std::ratio<1, 2>> + struct sparse_map { + private: + static constexpr bool key_equal_is_transparent = requires { + typename KeyEqual::is_transparent; + }; + + struct kv_select { + using key_type = Key; + using value_type = T; + using both_type = std::pair; + + template + static key_type const &key(std::pair const &key_value) noexcept { + return key_value.first; + } + + template + static value_type const &value(std::pair const &key_value) noexcept { + return key_value.second; + } + + template + static value_type &value(std::pair &key_value) noexcept { + return key_value.second; + } + + template + static both_type const &both(std::pair const &key_value) noexcept { + return reinterpret_cast(key_value); + } + + template + static both_type &both(std::pair &key_value) noexcept { + return reinterpret_cast(key_value); + } + }; + + using ht = internal::sparse_hash, kv_select, Hash, KeyEqual, Allocator, + GrowthPolicy, ExceptionSafety, Sparsity, probing::quadratic, MaxLoadFactor>; + + ht ht_; + + public: + using key_type = typename ht::key_type; + using mapped_type = T; + using value_type = typename ht::value_type; + using size_type = typename ht::size_type; + using difference_type = typename ht::difference_type; + using hasher = typename ht::hasher; + using key_equal = typename ht::key_equal; + using allocator_type = typename ht::allocator_type; + using reference = typename ht::reference; + using const_reference = typename ht::const_reference; + using pointer = typename ht::pointer; + using const_pointer = typename ht::const_pointer; + using iterator = typename ht::iterator; + using const_iterator = typename ht::const_iterator; + static constexpr float max_load_factor = ht::max_load_factor; + + public: + sparse_map() noexcept(ht::default_init_bucket_count == 0) : ht_{ht::default_init_bucket_count, {}, {}, {}} {} + + explicit sparse_map(size_type bucket_count, + hasher const &hash = {}, + key_equal const &equal = {}, + allocator_type const &alloc = {}) : ht_{bucket_count, hash, equal, alloc} { + } + + sparse_map(size_type bucket_count, allocator_type const &alloc) : ht_{bucket_count, {}, {}, alloc} { + } + + sparse_map(size_type bucket_count, hasher const &hash, allocator_type const &alloc) : ht_{bucket_count, hash, {}, alloc} { + } + + explicit sparse_map(allocator_type const &alloc) noexcept(ht::default_init_bucket_count == 0) : ht_{ht::default_init_bucket_count, {}, {}, alloc} { + } + + template + sparse_map(InputIt first, + InputIt last, + size_type bucket_count = ht::default_init_bucket_count, + hasher const &hash = {}, + key_equal const &equal = {}, + allocator_type const &alloc = {}) : ht_{bucket_count, hash, equal, alloc} { + insert(first, last); + } + + template + sparse_map(InputIt first, + InputIt last, + size_type bucket_count, + allocator_type const &alloc) : sparse_map{first, last, bucket_count, {}, {}, alloc} { + } + + template + sparse_map(InputIt first, + InputIt last, + size_type bucket_count, + hasher const &hash, allocator_type const &alloc) : sparse_map{first, last, bucket_count, hash, {}, alloc} { + } + + sparse_map(std::initializer_list init, + size_type bucket_count = ht::default_init_bucket_count, + hasher const &hash = {}, + key_equal const &equal = {}, + allocator_type const &alloc = {}) : sparse_map{init.begin(), init.end(), bucket_count, hash, equal, alloc} { + } + + sparse_map(std::initializer_list init, + size_type bucket_count, + allocator_type const &alloc) : sparse_map(init.begin(), init.end(), bucket_count, {}, {}, alloc) { + } + + sparse_map(std::initializer_list init, + size_type bucket_count, + hasher const &hash, + allocator_type const &alloc) : sparse_map{init.begin(), init.end(), bucket_count, hash, {}, alloc} { + } + + sparse_map &operator=(std::initializer_list ilist) { + ht_.clear(); + + ht_.reserve(ilist.size()); + ht_.insert(ilist.begin(), ilist.end()); + + return *this; + } + + [[nodiscard]] allocator_type get_allocator() const { return ht_.get_allocator(); } + + [[nodiscard]] iterator begin() noexcept { return ht_.begin(); } + [[nodiscard]] const_iterator begin() const noexcept { return ht_.begin(); } + [[nodiscard]] const_iterator cbegin() const noexcept { return ht_.cbegin(); } + + [[nodiscard]] iterator end() noexcept { return ht_.end(); } + [[nodiscard]] const_iterator end() const noexcept { return ht_.end(); } + [[nodiscard]] const_iterator cend() const noexcept { return ht_.cend(); } + + [[nodiscard]] bool empty() const noexcept { return ht_.empty(); } + [[nodiscard]] size_type size() const noexcept { return ht_.size(); } + [[nodiscard]] size_type max_size() const noexcept { return ht_.max_size(); } + + void clear() noexcept { + ht_.clear(); + } + + std::pair insert(value_type const &value) { + return ht_.insert(value); + } + + template requires (std::is_constructible_v) + std::pair insert(P &&value) { + return ht_.emplace(std::forward

(value)); + } + + std::pair insert(value_type &&value) { + return ht_.insert(std::move(value)); + } + + iterator insert(const_iterator hint, value_type const &value) { + return ht_.insert_hint(hint, value); + } + + template requires (std::is_constructible_v) + iterator insert(const_iterator hint, P &&value) { + return ht_.emplace_hint(hint, std::forward

(value)); + } + + iterator insert(const_iterator hint, value_type &&value) { + return ht_.insert_hint(hint, std::move(value)); + } + + template + void insert(InputIt first, InputIt last) { + ht_.insert(first, last); + } + + void insert(std::initializer_list ilist) { + ht_.insert(ilist.begin(), ilist.end()); + } + + template + std::pair insert_or_assign(key_type const &k, M &&obj) { + return ht_.insert_or_assign(k, std::forward(obj)); + } + + template + std::pair insert_or_assign(key_type &&k, M &&obj) { + return ht_.insert_or_assign(std::move(k), std::forward(obj)); + } + + template + iterator insert_or_assign(const_iterator hint, key_type const &k, M &&obj) { + return ht_.insert_or_assign(hint, k, std::forward(obj)); + } + + template + iterator insert_or_assign(const_iterator hint, key_type &&k, M &&obj) { + return ht_.insert_or_assign(hint, std::move(k), std::forward(obj)); + } + + /** + * Due to the way elements are stored, emplace will need to move or copy the + * key-value once. The method is equivalent to + * `insert(value_type(std::forward(args)...));`. + * + * Mainly here for compatibility with the `std::unordered_map` interface. + */ + template + std::pair emplace(Args &&...args) { + return ht_.emplace(std::forward(args)...); + } + + /** + * Due to the way elements are stored, emplace_hint will need to move or copy + * the key-value once. The method is equivalent to `insert(hint, + * value_type(std::forward(args)...));`. + * + * Mainly here for compatibility with the `std::unordered_map` interface. + */ + template + iterator emplace_hint(const_iterator hint, Args &&...args) { + return ht_.emplace_hint(hint, std::forward(args)...); + } + + template + std::pair try_emplace(key_type const &k, Args &&...args) { + return ht_.try_emplace(k, std::forward(args)...); + } + + template + std::pair try_emplace(key_type &&k, Args &&...args) { + return ht_.try_emplace(std::move(k), std::forward(args)...); + } + + template + iterator try_emplace(const_iterator hint, key_type const &k, Args &&...args) { + return ht_.try_emplace_hint(hint, k, std::forward(args)...); + } + + template + iterator try_emplace(const_iterator hint, key_type &&k, Args &&...args) { + return ht_.try_emplace_hint(hint, std::move(k), + std::forward(args)...); + } + + iterator erase(iterator pos) { return ht_.erase(pos); } + iterator erase(const_iterator pos) { return ht_.erase(pos); } + iterator erase(const_iterator first, const_iterator last) { + return ht_.erase(first, last); + } + size_type erase(key_type const &key) { return ht_.erase(key); } + + /** + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + size_type erase(key_type const &key, std::size_t precalculated_hash) { + return ht_.erase(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * `KeyEqual::is_transparent` exists. If so, `K` must be hashable and + * comparable to `Key`. + */ + template requires (key_equal_is_transparent) + size_type erase(K const &key) { + return ht_.erase(key); + } + + /** + * @copydoc erase(const K& key) + * + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + template requires (key_equal_is_transparent) + size_type erase(K const &key, std::size_t precalculated_hash) { + return ht_.erase(key, precalculated_hash); + } + + void swap(sparse_map &other) noexcept { + other.ht_.swap(ht_); + } + + T &at(Key const &key) { return ht_.at(key); } + + /** + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + T &at(Key const &key, std::size_t precalculated_hash) { + return ht_.at(key, precalculated_hash); + } + + T const &at(Key const &key) const { return ht_.at(key); } + + /** + * @copydoc at(const Key& key, std::size_t precalculated_hash) + */ + T const &at(Key const &key, std::size_t precalculated_hash) const { + return ht_.at(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * `KeyEqual::is_transparent` exists. If so, `K` must be hashable and + * comparable to `Key`. + */ + template requires (key_equal_is_transparent) + T &at(K const &key) { + return ht_.at(key); + } + + /** + * @copydoc at(const K& key) + * + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + template requires (key_equal_is_transparent) + T &at(K const &key, std::size_t precalculated_hash) { + return ht_.at(key, precalculated_hash); + } + + /** + * @copydoc at(const K& key) + */ + template requires (key_equal_is_transparent) + T const &at(K const &key) const { + return ht_.at(key); + } + + /** + * @copydoc at(const K& key, std::size_t precalculated_hash) + */ + template requires (key_equal_is_transparent) + T const &at(K const &key, std::size_t precalculated_hash) const { + return ht_.at(key, precalculated_hash); + } + + T &operator[](Key const &key) { return ht_[key]; } + T &operator[](Key &&key) { return ht_[std::move(key)]; } + + [[nodiscard]] size_type count(Key const &key) const noexcept { + return ht_.count(key); + } + + /** + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + [[nodiscard]] size_type count(Key const &key, std::size_t precalculated_hash) const noexcept { + return ht_.count(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * `KeyEqual::is_transparent` exists. If so, `K` must be hashable and + * comparable to `Key`. + */ + template requires (key_equal_is_transparent) + [[nodiscard]] size_type count(K const &key) const noexcept { + return ht_.count(key); + } + + /** + * @copydoc count(const K& key) const + * + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + template requires (key_equal_is_transparent) + [[nodiscard]] size_type count(K const &key, std::size_t precalculated_hash) const noexcept { + return ht_.count(key, precalculated_hash); + } + + [[nodiscard]] iterator find(Key const &key) noexcept { + return ht_.find(key); + } + + /** + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + [[nodiscard]] iterator find(Key const &key, std::size_t precalculated_hash) noexcept { + return ht_.find(key, precalculated_hash); + } + + [[nodiscard]] const_iterator find(Key const &key) const noexcept { + return ht_.find(key); + } + + /** + * @copydoc find(const Key& key, std::size_t precalculated_hash) + */ + [[nodiscard]] const_iterator find(Key const &key, std::size_t precalculated_hash) const noexcept { + return ht_.find(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * `KeyEqual::is_transparent` exists. If so, `K` must be hashable and + * comparable to `Key`. + */ + template requires (key_equal_is_transparent) + [[nodiscard]] iterator find(K const &key) noexcept { + return ht_.find(key); + } + + /** + * @copydoc find(const K& key) + * + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + template requires (key_equal_is_transparent) + [[nodiscard]] iterator find(K const &key, std::size_t precalculated_hash) noexcept { + return ht_.find(key, precalculated_hash); + } + + /** + * @copydoc find(const K& key) + */ + template requires (key_equal_is_transparent) + [[nodiscard]] const_iterator find(K const &key) const noexcept { + return ht_.find(key); + } + + /** + * @copydoc find(const K& key) + * + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + template requires (key_equal_is_transparent) + [[nodiscard]] const_iterator find(K const &key, std::size_t precalculated_hash) const noexcept { + return ht_.find(key, precalculated_hash); + } + + [[nodiscard]] bool contains(Key const &key) const noexcept { + return ht_.contains(key); + } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + [[nodiscard]] bool contains(Key const &key, std::size_t precalculated_hash) const noexcept { + return ht_.contains(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * KeyEqual::is_transparent exists. If so, K must be hashable and comparable + * to Key. + */ + template requires (key_equal_is_transparent) + [[nodiscard]] bool contains(K const &key) const noexcept { + return ht_.contains(key); + } + + /** + * @copydoc contains(const K& key) const + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + template requires (key_equal_is_transparent) + [[nodiscard]] bool contains(K const &key, std::size_t precalculated_hash) const noexcept { + return ht_.contains(key, precalculated_hash); + } + + [[nodiscard]] std::pair equal_range(Key const &key) { + return ht_.equal_range(key); + } + + /** + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + [[nodiscard]] std::pair equal_range(Key const &key, + std::size_t precalculated_hash) { + return ht_.equal_range(key, precalculated_hash); + } + + [[nodiscard]] std::pair equal_range(Key const &key) const { + return ht_.equal_range(key); + } + + /** + * @copydoc equal_range(const Key& key, std::size_t precalculated_hash) + */ + [[nodiscard]] std::pair equal_range(Key const &key, + std::size_t precalculated_hash) const { + return ht_.equal_range(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * `KeyEqual::is_transparent` exists. If so, `K` must be hashable and + * comparable to `Key`. + */ + template requires (key_equal_is_transparent) + [[nodiscard]] std::pair equal_range(K const &key) { + return ht_.equal_range(key); + } + + /** + * @copydoc equal_range(const K& key) + * + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + template requires (key_equal_is_transparent) + [[nodiscard]] std::pair equal_range(K const &key, + std::size_t precalculated_hash) { + return ht_.equal_range(key, precalculated_hash); + } + + /** + * @copydoc equal_range(const K& key) + */ + template requires (key_equal_is_transparent) + [[nodiscard]] std::pair equal_range(K const &key) const { + return ht_.equal_range(key); + } + + /** + * @copydoc equal_range(const K& key, std::size_t precalculated_hash) + */ + template requires (key_equal_is_transparent) + [[nodiscard]] std::pair equal_range(K const &key, + std::size_t precalculated_hash) const { + return ht_.equal_range(key, precalculated_hash); + } + + [[nodiscard]] size_type bucket_count() const noexcept { return ht_.bucket_count(); } + [[nodiscard]] size_type max_bucket_count() const noexcept { return ht_.max_bucket_count(); } + + [[nodiscard]] float load_factor() const noexcept { return ht_.load_factor(); } + + void rehash(size_type count) { ht_.rehash(count); } + void reserve(size_type count) { ht_.reserve(count); } + + [[nodiscard]] hasher hash_function() const { return ht_.hash_function(); } + [[nodiscard]] key_equal key_eq() const { return ht_.key_eq(); } + + friend bool operator==(sparse_map const &lhs, sparse_map const &rhs) { + if (lhs.size() != rhs.size()) { + return false; + } + + for (auto const &element_lhs : lhs) { + const auto it_element_rhs = rhs.find(element_lhs.first); + if (it_element_rhs == rhs.cend() || + element_lhs.second != it_element_rhs->second) { + return false; + } + } + + return true; + } + + friend bool operator!=(sparse_map const &lhs, sparse_map const &rhs) { + return !operator==(lhs, rhs); + } + + friend void swap(sparse_map &lhs, sparse_map &rhs) { lhs.swap(rhs); } + }; + + /** + * Same as `dice::sparse_map`. + */ + template, + typename KeyEqual = std::equal_to, + typename Allocator = std::allocator>> + using sparse_pg_map = sparse_map; + +}// namespace dice::sparse_map + +#endif // DICE_SPARSE_MAP_SPARSE_MAP_HPP diff --git a/include/dice/sparse_map/sparse_props.hpp b/include/dice/sparse_map/sparse_props.hpp new file mode 100644 index 0000000..80b46d9 --- /dev/null +++ b/include/dice/sparse_map/sparse_props.hpp @@ -0,0 +1,33 @@ +#ifndef DICE_SPARSE_MAP_SPARSE_PROPS_HPP +#define DICE_SPARSE_MAP_SPARSE_PROPS_HPP + +#include +#include + +namespace dice::sparse_map { + enum struct probing : bool { + linear, + quadratic + }; + + enum struct exception_safety : bool { + basic, + strong + }; + + enum struct sparsity : uint8_t { + high, + medium, + low + }; + + template + concept ratio = requires { + { T::num } -> std::convertible_to; + { T::den } -> std::convertible_to; + }; + + using default_max_load_factor = std::ratio<1, 2>; +} // namespace dice::sparse_map + +#endif//DICE_SPARSE_MAP_SPARSE_PROPS_HPP diff --git a/include/dice/sparse_map/sparse_set.hpp b/include/dice/sparse_map/sparse_set.hpp new file mode 100644 index 0000000..19ad83a --- /dev/null +++ b/include/dice/sparse_map/sparse_set.hpp @@ -0,0 +1,561 @@ +/** + * MIT License + * + * Copyright (c) 2017 Thibaut Goetghebuer-Planchon + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef DICE_SPARSE_MAP_SPARSE_SET_HPP +#define DICE_SPARSE_MAP_SPARSE_SET_HPP + +#include +#include +#include +#include +#include +#include + +#include "internal/sparse_hash.hpp" + +namespace dice::sparse_map { + + /** + * Implementation of a sparse hash set using open-addressing with quadratic + * probing. The goal on the hash set is to be the most memory efficient + * possible, even at low load factor, while keeping reasonable performances. + * + * `GrowthPolicy` defines how the set grows and consequently how a hash value is + * mapped to a bucket. By default the set uses + * `dice::sh::power_of_two_growth_policy`. This policy keeps the number of + * buckets to a power of two and uses a mask to map the hash to a bucket instead + * of the slow modulo. Other growth policies are available and you may define + * your own growth policy, check `dice::sh::power_of_two_growth_policy` for the + * interface. + * + * `ExceptionSafety` defines the exception guarantee provided by the class. By + * default only the basic exception safety is guaranteed which mean that all + * resources used by the hash set will be freed (no memory leaks) but the hash + * set may end-up in an undefined state if an exception is thrown (undefined + * here means that some elements may be missing). This can ONLY happen on rehash + * (either on insert or if `rehash` is called explicitly) and will occur if the + * Allocator can't allocate memory (`std::bad_alloc`) or if the copy constructor + * (when a nothrow move constructor is not available) throws an exception. This + * can be avoided by calling `reserve` beforehand. This basic guarantee is + * similar to the one of `google::sparse_hash_map` and `spp::sparse_hash_map`. + * It is possible to ask for the strong exception guarantee with + * `dice::sh::exception_safety::strong`, the drawback is that the set will be + * slower on rehashes and will also need more memory on rehashes. + * + * `Sparsity` defines how much the hash set will compromise between insertion + * speed and memory usage. A high sparsity means less memory usage but longer + * insertion times, and vice-versa for low sparsity. The default + * `dice::sh::sparsity::medium` sparsity offers a good compromise. It doesn't + * change the lookup speed. + * + * `Key` must be nothrow move constructible and/or copy constructible. + * + * If the destructor of `Key` throws an exception, the behaviour of the class is + * undefined. + * + * Iterators invalidation: + * - clear, operator=, reserve, rehash: always invalidate the iterators. + * - insert, emplace, emplace_hint: if there is an effective insert, invalidate + * the iterators. + * - erase: always invalidate the iterators. + */ + template, + typename KeyEqual = std::equal_to, + typename Allocator = std::allocator, + growth_policy GrowthPolicy = power_of_two_growth_policy<2>, + exception_safety ExceptionSafety = exception_safety::basic, + sparsity Sparsity = sparsity::medium, + ratio MaxLoadFactor = std::ratio<1, 2>> + struct sparse_set { + private: + static constexpr bool key_equal_is_transparent = requires { + typename KeyEqual::is_transparent; + }; + + struct k_select { + using key_type = Key; + using both_type = Key const; + + static key_type const &key(Key const &key) noexcept { + return key; + } + + static both_type &both(Key const &key) noexcept { + return key; + } + }; + + using ht = internal::sparse_hash; + + ht ht_; + + public: + using key_type = typename ht::key_type; + using value_type = typename ht::value_type; + using size_type = typename ht::size_type; + using difference_type = typename ht::difference_type; + using hasher = typename ht::hasher; + using key_equal = typename ht::key_equal; + using allocator_type = typename ht::allocator_type; + using reference = typename ht::reference; + using const_reference = typename ht::const_reference; + using pointer = typename ht::pointer; + using const_pointer = typename ht::const_pointer; + using iterator = typename ht::iterator; + using const_iterator = typename ht::const_iterator; + + sparse_set() : sparse_set(ht::default_init_bucket_count) {} + + explicit sparse_set(size_type bucket_count, + hasher const &hash = {}, + key_equal const &equal = {}, + allocator_type const &alloc = {}) : ht_{bucket_count, hash, equal, alloc} { + } + + sparse_set(size_type bucket_count, allocator_type const &alloc) : sparse_set{bucket_count, {}, {}, alloc} { + } + + sparse_set(size_type bucket_count, hasher const &hash, allocator_type const &alloc) : sparse_set{bucket_count, hash, KeyEqual(), alloc} { + } + + explicit sparse_set(allocator_type const &alloc) : sparse_set{ht::default_init_bucket_count, alloc} { + } + + template + sparse_set(InputIt first, + InputIt last, + size_type bucket_count = ht::default_init_bucket_count, + hasher const &hash = {}, + key_equal const &equal = {}, + allocator_type const &alloc = {}) : sparse_set{bucket_count, hash, equal, alloc} { + insert(first, last); + } + + template + sparse_set(InputIt first, + InputIt last, + size_type bucket_count, + allocator_type const &alloc) : sparse_set{first, last, bucket_count, {}, {}, alloc} { + } + + template + sparse_set(InputIt first, InputIt last, + size_type bucket_count, + hasher const &hash, + allocator_type const &alloc) : sparse_set{first, last, bucket_count, hash, {}, alloc} { + } + + sparse_set(std::initializer_list init, + size_type bucket_count = ht::default_init_bucket_count, + hasher const &hash = {}, + key_equal const &equal = {}, + allocator_type const &alloc = {}) : sparse_set{init.begin(), init.end(), bucket_count, hash, equal, alloc} { + } + + sparse_set(std::initializer_list init, + size_type bucket_count, + allocator_type const &alloc) : sparse_set(init.begin(), init.end(), bucket_count, {}, {}, alloc) { + } + + sparse_set(std::initializer_list init, + size_type bucket_count, + hasher const &hash, allocator_type const &alloc) : sparse_set{init.begin(), init.end(), bucket_count, hash, {}, alloc} { + } + + sparse_set &operator=(std::initializer_list ilist) { + ht_.clear(); + + ht_.reserve(ilist.size()); + ht_.insert(ilist.begin(), ilist.end()); + + return *this; + } + + [[nodiscard]] allocator_type get_allocator() const { return ht_.get_allocator(); } + + [[nodiscard]] iterator begin() noexcept { return ht_.begin(); } + [[nodiscard]] const_iterator begin() const noexcept { return ht_.begin(); } + [[nodiscard]] const_iterator cbegin() const noexcept { return ht_.cbegin(); } + + [[nodiscard]] iterator end() noexcept { return ht_.end(); } + [[nodiscard]] const_iterator end() const noexcept { return ht_.end(); } + [[nodiscard]] const_iterator cend() const noexcept { return ht_.cend(); } + + [[nodiscard]] bool empty() const noexcept { return ht_.empty(); } + [[nodiscard]] size_type size() const noexcept { return ht_.size(); } + [[nodiscard]] size_type max_size() const noexcept { return ht_.max_size(); } + + void clear() noexcept { ht_.clear(); } + + std::pair insert(value_type const &value) { + return ht_.insert(value); + } + + std::pair insert(value_type &&value) { + return ht_.insert(std::move(value)); + } + + iterator insert(const_iterator hint, value_type const &value) { + return ht_.insert_hint(hint, value); + } + + iterator insert(const_iterator hint, value_type &&value) { + return ht_.insert_hint(hint, std::move(value)); + } + + template + void insert(InputIt first, InputIt last) { + ht_.insert(first, last); + } + + void insert(std::initializer_list ilist) { + ht_.insert(ilist.begin(), ilist.end()); + } + + /** + * Due to the way elements are stored, emplace will need to move or copy the + * key-value once. The method is equivalent to + * `insert(value_type(std::forward(args)...));`. + * + * Mainly here for compatibility with the `std::unordered_map` interface. + */ + template + std::pair emplace(Args &&...args) { + return ht_.emplace(std::forward(args)...); + } + + /** + * Due to the way elements are stored, emplace_hint will need to move or copy + * the key-value once. The method is equivalent to `insert(hint, + * value_type(std::forward(args)...));`. + * + * Mainly here for compatibility with the `std::unordered_map` interface. + */ + template + iterator emplace_hint(const_iterator hint, Args &&...args) { + return ht_.emplace_hint(hint, std::forward(args)...); + } + + iterator erase(iterator pos) { return ht_.erase(pos); } + iterator erase(const_iterator pos) { return ht_.erase(pos); } + iterator erase(const_iterator first, const_iterator last) { + return ht_.erase(first, last); + } + size_type erase(key_type const &key) { return ht_.erase(key); } + + /** + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + size_type erase(key_type const &key, std::size_t precalculated_hash) { + return ht_.erase(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * `KeyEqual::is_transparent` exists. If so, `K` must be hashable and + * comparable to `Key`. + */ + template requires (key_equal_is_transparent) + size_type erase(K const &key) { + return ht_.erase(key); + } + + /** + * @copydoc erase(const K& key) + * + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + template requires (key_equal_is_transparent) + size_type erase(K const &key, std::size_t precalculated_hash) { + return ht_.erase(key, precalculated_hash); + } + + void swap(sparse_set &other) noexcept { + other.ht_.swap(ht_); + } + + [[nodiscard]] size_type count(Key const &key) const noexcept { + return ht_.count(key); + } + + /** + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + [[nodiscard]] size_type count(Key const &key, std::size_t precalculated_hash) const noexcept { + return ht_.count(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * `KeyEqual::is_transparent` exists. If so, `K` must be hashable and + * comparable to `Key`. + */ + template requires (key_equal_is_transparent) + [[nodiscard]] size_type count(K const &key) const noexcept { + return ht_.count(key); + } + + /** + * @copydoc count(const K& key) const + * + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + template requires (key_equal_is_transparent) + [[nodiscard]] size_type count(K const &key, std::size_t precalculated_hash) const noexcept { + return ht_.count(key, precalculated_hash); + } + + [[nodiscard]] iterator find(Key const &key) noexcept { + return ht_.find(key); + } + + /** + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + [[nodiscard]] iterator find(Key const &key, std::size_t precalculated_hash) noexcept { + return ht_.find(key, precalculated_hash); + } + + [[nodiscard]] const_iterator find(Key const &key) const noexcept { + return ht_.find(key); + } + + /** + * @copydoc find(const Key& key, std::size_t precalculated_hash) + */ + [[nodiscard]] const_iterator find(Key const &key, std::size_t precalculated_hash) const noexcept { + return ht_.find(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * `KeyEqual::is_transparent` exists. If so, `K` must be hashable and + * comparable to `Key`. + */ + template requires (key_equal_is_transparent) + [[nodiscard]] iterator find(K const &key) noexcept { + return ht_.find(key); + } + + /** + * @copydoc find(const K& key) + * + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + template requires (key_equal_is_transparent) + [[nodiscard]] iterator find(K const &key, std::size_t precalculated_hash) noexcept { + return ht_.find(key, precalculated_hash); + } + + /** + * @copydoc find(const K& key) + */ + template requires (key_equal_is_transparent) + [[nodiscard]] const_iterator find(K const &key) const noexcept { + return ht_.find(key); + } + + /** + * @copydoc find(const K& key) + * + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + template requires (key_equal_is_transparent) + [[nodiscard]] const_iterator find(K const &key, std::size_t precalculated_hash) const noexcept { + return ht_.find(key, precalculated_hash); + } + + [[nodiscard]] bool contains(Key const &key) const noexcept { + return ht_.contains(key); + } + + /** + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + [[nodiscard]] bool contains(Key const &key, std::size_t precalculated_hash) const noexcept { + return ht_.contains(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * KeyEqual::is_transparent exists. If so, K must be hashable and comparable + * to Key. + */ + template requires (key_equal_is_transparent) + [[nodiscard]] bool contains(K const &key) const noexcept { + return ht_.contains(key); + } + + /** + * @copydoc contains(const K& key) const + * + * Use the hash value 'precalculated_hash' instead of hashing the key. The + * hash value should be the same as hash_function()(key). Useful to speed-up + * the lookup if you already have the hash. + */ + template requires (key_equal_is_transparent) + [[nodiscard]] bool contains(K const &key, std::size_t precalculated_hash) const noexcept { + return ht_.contains(key, precalculated_hash); + } + + [[nodiscard]] std::pair equal_range(Key const &key) noexcept { + return ht_.equal_range(key); + } + + /** + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + [[nodiscard]] std::pair equal_range(Key const &key, + std::size_t precalculated_hash) noexcept { + return ht_.equal_range(key, precalculated_hash); + } + + [[nodiscard]] std::pair equal_range(Key const &key) const noexcept { + return ht_.equal_range(key); + } + + /** + * @copydoc equal_range(const Key& key, std::size_t precalculated_hash) + */ + [[nodiscard]] std::pair equal_range(Key const &key, + std::size_t precalculated_hash) const noexcept { + return ht_.equal_range(key, precalculated_hash); + } + + /** + * This overload only participates in the overload resolution if the typedef + * `KeyEqual::is_transparent` exists. If so, `K` must be hashable and + * comparable to `Key`. + */ + template requires (key_equal_is_transparent) + [[nodiscard]] std::pair equal_range(K const &key) noexcept { + return ht_.equal_range(key); + } + + /** + * @copydoc equal_range(const K& key) + * + * Use the hash value `precalculated_hash` instead of hashing the key. The + * hash value should be the same as `hash_function()(key)`, otherwise the + * behaviour is undefined. Useful to speed-up the lookup if you already have + * the hash. + */ + template requires (key_equal_is_transparent) + [[nodiscard]] std::pair equal_range(K const &key, + std::size_t precalculated_hash) noexcept { + return ht_.equal_range(key, precalculated_hash); + } + + /** + * @copydoc equal_range(const K& key) + */ + template requires (key_equal_is_transparent) + [[nodiscard]] std::pair equal_range(K const &key) const noexcept { + return ht_.equal_range(key); + } + + /** + * @copydoc equal_range(const K& key, std::size_t precalculated_hash) + */ + template requires (key_equal_is_transparent) + [[nodiscard]] std::pair equal_range(K const &key, + std::size_t precalculated_hash) const noexcept { + return ht_.equal_range(key, precalculated_hash); + } + + [[nodiscard]] size_type bucket_count() const noexcept { return ht_.bucket_count(); } + [[nodiscard]] size_type max_bucket_count() const noexcept { return ht_.max_bucket_count(); } + + [[nodiscard]] float load_factor() const noexcept { return ht_.load_factor(); } + [[nodiscard]] float max_load_factor() const noexcept { return ht_.max_load_factor(); } + + void rehash(size_type count) { ht_.rehash(count); } + void reserve(size_type count) { ht_.reserve(count); } + + [[nodiscard]] hasher hash_function() const { return ht_.hash_function(); } + [[nodiscard]] key_equal key_eq() const { return ht_.key_eq(); } + + friend bool operator==(sparse_set const &lhs, sparse_set const &rhs) { + if (lhs.size() != rhs.size()) { + return false; + } + + for (auto const &element_lhs : lhs) { + const auto it_element_rhs = rhs.find(element_lhs); + if (it_element_rhs == rhs.cend()) { + return false; + } + } + + return true; + } + + friend bool operator!=(sparse_set const &lhs, sparse_set const &rhs) { + return !operator==(lhs, rhs); + } + + friend void swap(sparse_set &lhs, sparse_set &rhs) { lhs.swap(rhs); } + }; + + /** + * Same as `dice::sparse_set`. + */ + template, + typename KeyEqual = std::equal_to, + typename Allocator = std::allocator> + using sparse_pg_set = sparse_set; + +}// namespace dice::sparse_map + +#endif diff --git a/test_package/example.cpp b/test_package/example.cpp index d8618b5..0440ef5 100644 --- a/test_package/example.cpp +++ b/test_package/example.cpp @@ -1,4 +1,4 @@ -#include +#include int main() { dice::sparse_map::sparse_map x; diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index ead0538..bbb0490 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,33 +1,41 @@ -cmake_minimum_required(VERSION 3.18) +include(FetchContent) +FetchContent_Declare( + DocTest + GIT_REPOSITORY "https://github.com/doctest/doctest.git" + GIT_TAG "v2.4.11" + GIT_SHALLOW TRUE) +FetchContent_MakeAvailable(DocTest) -project(tsl_sparse_map_tests) +macro(make_test DIR NAME) + if ("${DIR}" STREQUAL ".") + set(TARGET ${NAME}) + else () + set(TARGET ${DIR}-${NAME}) + endif () -add_executable(tsl_sparse_map_tests "main.cpp" - "custom_allocator_tests.cpp" - "policy_tests.cpp" - "popcount_tests.cpp" - "sparse_map_tests.cpp" - "sparse_set_tests.cpp" - "fancy_pointer/sparse_array_tests.cpp" - "fancy_pointer/sparse_hash_map_tests.cpp" - "fancy_pointer/sparse_hash_set_tests.cpp" - "scoped_allocator_adaptor/sparse_array_tests.cpp" - "scoped_allocator_adaptor/sparse_hash_set_tests.cpp" - ) + add_executable(${TARGET} ${DIR}/${NAME}.cpp) + target_link_libraries(${TARGET} + doctest::doctest + dice-sparse-map::dice-sparse-map + Boost::headers + ) + set_property(TARGET ${TARGET} PROPERTY CXX_STANDARD 20) + add_test(NAME ${TARGET} COMMAND ${TARGET}) -target_compile_features(tsl_sparse_map_tests PRIVATE cxx_std_20) + if(CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR CMAKE_CXX_COMPILER_ID MATCHES "GNU") + target_compile_options(${TARGET} PRIVATE -Werror -Wall -Wextra -Wold-style-cast) + elseif(CMAKE_CXX_COMPILER_ID MATCHES "MSVC") + target_compile_options(${TARGET} PRIVATE /bigobj /WX /W3) + endif() +endmacro () -if(CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR CMAKE_CXX_COMPILER_ID MATCHES "GNU") - target_compile_options(tsl_sparse_map_tests PRIVATE -Werror -Wall -Wextra -Wold-style-cast -DTSL_DEBUG -UNDEBUG) -elseif(CMAKE_CXX_COMPILER_ID MATCHES "MSVC") - target_compile_options(tsl_sparse_map_tests PRIVATE /bigobj /WX /W3 /DTSL_DEBUG /UNDEBUG) -endif() +find_package(Boost REQUIRED COMPONENTS) -# Boost::unit_test_framework -set(Boost_USE_STATIC_LIBS ON) -find_package(Boost 1.54.0 REQUIRED COMPONENTS unit_test_framework) -target_link_libraries(tsl_sparse_map_tests PRIVATE Boost::unit_test_framework) - -# dice-sparse-map::dice-sparse-map -add_subdirectory(../ ${CMAKE_CURRENT_BINARY_DIR}/Dice/sparse-map) -target_link_libraries(tsl_sparse_map_tests PRIVATE dice-sparse-map::dice-sparse-map) +make_test(. custom_allocator_tests) +make_test(. policy_tests) +make_test(. sparse_map_tests) +make_test(. sparse_set_tests) +make_test(fancy_pointer sparse_array_tests) +make_test(fancy_pointer sparse_hash_map_tests) +make_test(fancy_pointer sparse_hash_set_tests) +make_test(scoped_allocator_adaptor sparse_hash_set_tests) diff --git a/tests/custom_allocator_tests.cpp b/tests/custom_allocator_tests.cpp index 2fc7395..99c0e01 100644 --- a/tests/custom_allocator_tests.cpp +++ b/tests/custom_allocator_tests.cpp @@ -21,85 +21,80 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#define BOOST_TEST_DYN_LINK +#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN +#include -#include - -#include -#include +#include #include #include #include -#include #include #include -#include "utils.h" - static std::size_t nb_custom_allocs = 0; -template +template class custom_allocator { - public: - using value_type = T; - using pointer = T*; - using const_pointer = const T*; - using reference = T&; - using const_reference = const T&; - using size_type = std::size_t; - using difference_type = std::ptrdiff_t; - using propagate_on_container_move_assignment = std::true_type; +public: + using value_type = T; + using pointer = T *; + using const_pointer = const T *; + using reference = T &; + using const_reference = const T &; + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + using propagate_on_container_move_assignment = std::true_type; - template - struct rebind { - using other = custom_allocator; - }; + template + struct rebind { + using other = custom_allocator; + }; - custom_allocator() = default; + custom_allocator() = default; - template - custom_allocator(const custom_allocator&) {} + template + custom_allocator(const custom_allocator &) {} - pointer address(reference x) const noexcept { return &x; } + pointer address(reference x) const noexcept { return &x; } - const_pointer address(const_reference x) const noexcept { return &x; } + const_pointer address(const_reference x) const noexcept { return &x; } - pointer allocate(size_type n, const void* /*hint*/ = 0) { - nb_custom_allocs++; + pointer allocate(size_type n, const void * /*hint*/ = 0) { + nb_custom_allocs++; - pointer ptr = static_cast(std::malloc(n * sizeof(T))); - if (ptr == nullptr) { - throw std::bad_alloc(); - } + pointer ptr = static_cast(std::malloc(n * sizeof(T))); + if (ptr == nullptr) { + throw std::bad_alloc(); + } - return ptr; - } + return ptr; + } - void deallocate(T* p, size_type /*n*/) { std::free(p); } + void deallocate(T *p, size_type /*n*/) { std::free(p); } - size_type max_size() const noexcept { - return std::numeric_limits::max() / sizeof(value_type); - } + size_type max_size() const noexcept { + return std::numeric_limits::max() / sizeof(value_type); + } - template - void construct(U* p, Args&&... args) { - ::new (static_cast(p)) U(std::forward(args)...); - } + template + void construct(U *p, Args &&...args) { + ::new (static_cast(p)) U(std::forward(args)...); + } - template - void destroy(U* p) { - p->~U(); - } + template + void destroy(U *p) { + p->~U(); + } }; -template -bool operator==(const custom_allocator&, const custom_allocator&) { - return true; +template +bool operator==(const custom_allocator &, const custom_allocator &) { + return true; } -template -bool operator!=(const custom_allocator&, const custom_allocator&) { - return false; +template +bool operator!=(const custom_allocator &, const custom_allocator &) { + return false; } // TODO Avoid overloading new to check number of global new @@ -113,23 +108,21 @@ bool operator!=(const custom_allocator&, const custom_allocator&) { // std::free(ptr); // } -BOOST_AUTO_TEST_SUITE(test_custom_allocator) +TEST_SUITE("custom allocator") { + TEST_CASE("test_1") { + // nb_global_new = 0; + nb_custom_allocs = 0; -BOOST_AUTO_TEST_CASE(test_custom_allocator_1) { - // nb_global_new = 0; - nb_custom_allocs = 0; + dice::sparse_map::sparse_map, std::equal_to, + custom_allocator>> + map; - dice::sparse_map::sparse_map, std::equal_to, - custom_allocator>> - map; + const int nb_elements = 1000; + for (int i = 0; i < nb_elements; i++) { + map.insert({i, i * 2}); + } - const int nb_elements = 1000; - for (int i = 0; i < nb_elements; i++) { - map.insert({i, i * 2}); - } - - BOOST_CHECK_NE(nb_custom_allocs, 0); - // BOOST_CHECK_EQUAL(nb_global_new, 0); + CHECK_NE(nb_custom_allocs, 0); + // BOOST_CHECK_EQUAL(nb_global_new, 0); + } } - -BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/fancy_pointer/CustomAllocator.hpp b/tests/fancy_pointer/CustomAllocator.hpp index 733adb5..377844d 100644 --- a/tests/fancy_pointer/CustomAllocator.hpp +++ b/tests/fancy_pointer/CustomAllocator.hpp @@ -2,8 +2,8 @@ * @bief Home of a custom allocator for testing with fancy pointers. */ -#ifndef TSL_SPARSE_MAP_TESTS_CUSTOMALLOCATOR_HPP -#define TSL_SPARSE_MAP_TESTS_CUSTOMALLOCATOR_HPP +#ifndef DICE_SPARSE_MAP_TESTS_CUSTOMALLOCATOR_HPP +#define DICE_SPARSE_MAP_TESTS_CUSTOMALLOCATOR_HPP #include @@ -20,6 +20,7 @@ struct OffsetAllocator { using void_pointer = offset_ptr; using const_void_pointer = offset_ptr; using difference_type = typename offset_ptr::difference_type; + using is_always_equal = std::false_type; // pretend this isn't just stdalloc OffsetAllocator() noexcept = default; OffsetAllocator(OffsetAllocator const &) noexcept = default; @@ -43,4 +44,4 @@ struct OffsetAllocator { } }; -#endif //TSL_SPARSE_MAP_TESTS_CUSTOMALLOCATOR_HPP \ No newline at end of file +#endif //DICE_SPARSE_MAP_TESTS_CUSTOMALLOCATOR_HPP \ No newline at end of file diff --git a/tests/fancy_pointer/sparse_array_tests.cpp b/tests/fancy_pointer/sparse_array_tests.cpp index e243def..924d48b 100644 --- a/tests/fancy_pointer/sparse_array_tests.cpp +++ b/tests/fancy_pointer/sparse_array_tests.cpp @@ -1,39 +1,23 @@ /** @file - * @brief Checks for fancy pointer support in the sparse_array implementation. + * @brief Checks for fancy pointer support in the sparse_array_type implementation. */ -#include -#include +#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN +#include + +#include #include "CustomAllocator.hpp" // Globals -constexpr auto MAX_INDEX = 32; //BITMAP_NB_BITS - -/* Tests are formulated via templates to reduce code duplication. - * The template parameter contains the Allocator type and the shorthand "Array" for the sparse_array - * (with all template parameter already inserted). - */ - -template -void compilation() { - typename T::Array test; -} - -template -void construction() { - typename T::Allocator a; - typename T::Array test(MAX_INDEX, a); - test.clear(a); //needed because destructor asserts -} +constexpr auto MAX_INDEX = 32; //discriminant_bits namespace details { template - typename T::Array generate_test_array(typename T::Allocator &a) { - typename T::Array arr(MAX_INDEX, a); + void generate_test_array(typename T::Array &arr, typename T::Allocator &a) { + new (&arr) typename T::Array(MAX_INDEX, a); for (std::size_t i = 0; i < MAX_INDEX; ++i) { - arr.set(a, i, i); + arr.set(a, i, static_cast(i)); } - return arr; } template @@ -46,90 +30,80 @@ namespace details { } } -template -void set() { - typename T::Allocator a; - auto test = details::generate_test_array(a); - auto check = details::generate_check_for_test_array(); - //'set' did not create the correct order of items - BOOST_REQUIRE(std::equal(test.begin(), test.end(), check.begin())); - test.clear(a); //needed because destructor asserts -} - -template -void copy_construction() { - typename T::Allocator a; - //needs to be its own line, otherwise the move-construction would take place - auto test = details::generate_test_array(a); - typename T::Array copy(test, a); - auto check = details::generate_check_for_test_array(); - //'copy' changed the order of the items - BOOST_REQUIRE(std::equal(copy.begin(), copy.end(), check.begin())); - test.clear(a); - copy.clear(a); -} - -template -void move_construction() { - typename T::Allocator a; - //two lines needed. Otherwise move/copy elision - auto moved_from = details::generate_test_array(a); - typename T::Array moved_to(std::move(moved_from)); - auto check = details::generate_check_for_test_array(); - //'move' changed the order of the items - BOOST_REQUIRE(std::equal(moved_to.begin(), moved_to.end(), check.begin())); - moved_to.clear(a); -} - -template -void const_iterator() { - typename T::Allocator a; - auto test = details::generate_test_array(a); - auto const_iter = test.cbegin(); - //const iterator has the wrong type - BOOST_REQUIRE((std::is_same::value)); - test.clear(a); -} - - -/* - * This are the types you can give the tests as template parameters. - */ -template +template struct STD { using Allocator = std::allocator; - using Array = dice::sparse_map::detail_sparse_hash::sparse_array, Sparsity>; + using Array = dice::sparse_map::internal::sparse_bucket, Sparsity>; using Const_Iterator = T const*; + using Value_Type = T; }; -template +template struct CUSTOM { using Allocator = OffsetAllocator; - using Array = dice::sparse_map::detail_sparse_hash::sparse_array, Sparsity>; + using Array = dice::sparse_map::internal::sparse_bucket, Sparsity>; using Const_Iterator = boost::interprocess::offset_ptr; + using Value_Type = T; }; - - -/* The instantiation of the tests. - * I don't use the boost template test cases because with this I can set the title of every test case myself. - */ -BOOST_AUTO_TEST_SUITE(fancy_pointers) -BOOST_AUTO_TEST_SUITE(sparse_array_tests) - -BOOST_AUTO_TEST_CASE(std_alloc_compile) {compilation>();} -BOOST_AUTO_TEST_CASE(std_alloc_construction) {construction>();} -BOOST_AUTO_TEST_CASE(std_alloc_set) {set>();} -BOOST_AUTO_TEST_CASE(std_alloc_copy_construction) {copy_construction>();} -BOOST_AUTO_TEST_CASE(std_alloc_move_construction) {move_construction>();} -BOOST_AUTO_TEST_CASE(std_const_iterator) {const_iterator>();} - -BOOST_AUTO_TEST_CASE(custom_alloc_compile) {compilation>();} -BOOST_AUTO_TEST_CASE(custom_alloc_construction) {construction>();} -BOOST_AUTO_TEST_CASE(custom_alloc_set) {set>();} -BOOST_AUTO_TEST_CASE(custom_alloc_copy_construction) {copy_construction>();} -BOOST_AUTO_TEST_CASE(custom_alloc_move_construction) {move_construction>();} -BOOST_AUTO_TEST_CASE(custom_const_iterator) {const_iterator>();} - -BOOST_AUTO_TEST_SUITE_END() -BOOST_AUTO_TEST_SUITE_END() +#define TEST_ARRAYS STD, CUSTOM + +TEST_SUITE("sparse array with fancy pointers") { + TEST_CASE_TEMPLATE("compile", T, TEST_ARRAYS) { + typename T::Array test; + (void) test; + } + + TEST_CASE_TEMPLATE("construction", T, TEST_ARRAYS) { + typename T::Allocator a; + typename T::Array test(MAX_INDEX, a); + test.clear(a); //needed because destructor asserts + } + + TEST_CASE_TEMPLATE("set", T, TEST_ARRAYS) { + typename T::Allocator a; + typename T::Array test; + details::generate_test_array(test, a); + auto check = details::generate_check_for_test_array(); + //'set' did not create the correct order of items + REQUIRE(std::equal(test.begin(), test.end(), check.begin())); + test.clear(a); //needed because destructor asserts + } + + TEST_CASE_TEMPLATE("copy ctor", T, TEST_ARRAYS) { + typename T::Allocator a; + typename T::Array test; + details::generate_test_array(test, a); + typename T::Array copy(test, a); + auto check = details::generate_check_for_test_array(); + //'copy' changed the order of the items + REQUIRE(std::equal(copy.begin(), copy.end(), check.begin())); + test.clear(a); + copy.clear(a); + } + + TEST_CASE_TEMPLATE("move ctor", T, CUSTOM) { + typename T::Allocator a; + typename T::Array moved_from; + //two lines needed. Otherwise move/copy elision + details::generate_test_array(moved_from, a); + + // calling ctor indended for uses when allocator differs between moved_from and moved_to + // so need to clean up moved_from afterwards + typename T::Array moved_to(std::move(moved_from), a); + auto check = details::generate_check_for_test_array(); + //'move' changed the order of the items + REQUIRE(std::equal(moved_to.begin(), moved_to.end(), check.begin())); + moved_to.clear(a); + } + + TEST_CASE_TEMPLATE("const iterator", T, TEST_ARRAYS) { + typename T::Allocator a; + typename T::Array test; + details::generate_test_array(test, a); + auto const_iter = test.cbegin(); + //const iterator has the wrong type + REQUIRE((std::is_same::value)); + test.clear(a); + } +} diff --git a/tests/fancy_pointer/sparse_hash_map_tests.cpp b/tests/fancy_pointer/sparse_hash_map_tests.cpp index c3a97fc..6a2321d 100644 --- a/tests/fancy_pointer/sparse_hash_map_tests.cpp +++ b/tests/fancy_pointer/sparse_hash_map_tests.cpp @@ -2,10 +2,12 @@ * @brief Checks for fancy pointer support in the sparse_hash implementation for pair values (maps). */ +#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN +#include + #include -#include -#include -#include +#include +#include #include "CustomAllocator.hpp" /* Tests are analogous to the tests in sparse_array_tests.cpp. @@ -13,43 +15,54 @@ */ namespace details { template - struct KeySelect { + struct KeyValueSelect { using key_type = Key; - const key_type &operator()(std::pair const &key_value) const noexcept { - return key_value.first; - } - key_type &operator()(std::pair &key_value) noexcept { - return key_value.first; - } + using value_type = T; + using both_type = std::pair; + + template + static key_type const &key(std::pair const &key_value) noexcept { + return key_value.first; + } + + template + static value_type const &value(std::pair const &key_value) noexcept { + return key_value.second; + } + + template + static value_type &value(std::pair &key_value) noexcept { + return key_value.second; + } + + template + static both_type const &both(std::pair const &key_value) noexcept { + return reinterpret_cast(key_value); + } + + template + static both_type &both(std::pair &key_value) noexcept { + return reinterpret_cast(key_value); + } }; - template - struct ValueSelect { - using value_type = T; - const value_type &operator()(std::pair const &key_value) const noexcept { - return key_value.second; - } - value_type &operator()(std::pair &key_value) noexcept { - return key_value.second; - } - }; template - using sparse_map= dice::sparse_map::detail_sparse_hash::sparse_hash< - std::pair, KeySelect, ValueSelect, std::hash, std::equal_to, Alloc, - dice::sparse_map::sh::power_of_two_growth_policy<2>, - dice::sparse_map::sh::exception_safety::basic, - dice::sparse_map::sh::sparsity::medium, - dice::sparse_map::sh::probing::quadratic>; + using sparse_map = dice::sparse_map::internal::sparse_hash< + std::pair, KeyValueSelect, std::hash, std::equal_to, Alloc, + dice::sparse_map::power_of_two_growth_policy<2>, + dice::sparse_map::exception_safety::basic, + dice::sparse_map::sparsity::medium, + dice::sparse_map::probing::quadratic, + dice::sparse_map::default_max_load_factor>; template typename T::Map default_construct_map() { using Key = typename T::key_type; - return typename T::Map(T::Map::DEFAULT_INIT_BUCKET_COUNT, + return typename T::Map(T::Map::default_init_bucket_count, std::hash(), std::equal_to(), - typename T::Allocator(), - T::Map::DEFAULT_MAX_LOAD_FACTOR); + typename T::Allocator()); } /** Checks if all values of the map are in the initializer_list and than if the lengths are equal. @@ -72,62 +85,6 @@ namespace details { } } -template -void construction() { - auto map = details::default_construct_map(); -} - -template -void insert(std::initializer_list l) { - auto map = details::default_construct_map(); - for (auto dataPair : l) map.insert(dataPair); - //'insert' did not create exactly the values needed - BOOST_REQUIRE(details::is_equal(map, l)); -} - -template -void iterator_insert(std::initializer_list l) { - auto map = details::default_construct_map(); - map.insert(l.begin(), l.end()); - //'insert' with iterators did not create exactly the values needed - BOOST_REQUIRE(details::is_equal(map, l)); -} - -template -void iterator_access(typename T::value_type single_value) { - auto map = details::default_construct_map(); - map.insert(single_value); - //iterator cannot access single value - BOOST_REQUIRE( (*(map.begin()) == single_value)); -} - -template -void iterator_access_multi(std::initializer_list l) { - auto map = details::default_construct_map(); - map.insert(l.begin(), l.end()); - std::vector l_sorted = l; - std::vector map_sorted(map.begin(), map.end()); - std::sort(l_sorted.begin(), l_sorted.end()); - std::sort(map_sorted.begin(), map_sorted.end()); - //iterating over the map didn't work - BOOST_REQUIRE(std::equal(l_sorted.begin(), l_sorted.end(), - map_sorted.begin())); -} - -template -void value(std::initializer_list l, typename T::value_type to_change) { - auto map = details::default_construct_map(); - map.insert(l.begin(), l.end()); - map[to_change.first] = to_change.second; - - std::unordered_map check(l.begin(), l.end()); - check[to_change.first] = to_change.second; - - //changing a single value didn't work - BOOST_REQUIRE(details::is_equal(map, check)); -} - - template struct STD { using key_type = Key; @@ -145,38 +102,83 @@ struct CUSTOM { }; -BOOST_AUTO_TEST_SUITE(fancy_pointers) -BOOST_AUTO_TEST_SUITE(sparse_hash_map_tests) - -BOOST_AUTO_TEST_CASE(std_alloc_compiles) {construction>();} -BOOST_AUTO_TEST_CASE(std_alloc_insert) {insert>({{1,2},{3,4},{5,6}});} -BOOST_AUTO_TEST_CASE(std_alloc_iterator_insert) {insert>({{1,2},{3,4},{5,6}});} -BOOST_AUTO_TEST_CASE(std_alloc_iterator_access) {iterator_access>({1,42});} -BOOST_AUTO_TEST_CASE(std_alloc_iterator_access_multi) {iterator_access_multi>({{1,2},{3,4},{5,6}});} -BOOST_AUTO_TEST_CASE(std_alloc_value) {value>({{1,2},{3,4},{5,6}}, {1, 42});} - -BOOST_AUTO_TEST_CASE(custom_alloc_compiles) {construction>();} -BOOST_AUTO_TEST_CASE(custom_alloc_insert) {insert>({{1,2},{3,4},{5,6}});} -BOOST_AUTO_TEST_CASE(custom_alloc_iterator_insert) {insert>({{1,2},{3,4},{5,6}});} -BOOST_AUTO_TEST_CASE(custom_alloc_iterator_access) {iterator_access>({1,42});} -BOOST_AUTO_TEST_CASE(custom_alloc_iterator_access_multi) {iterator_access_multi>({{1,2},{3,4},{5,6}});} -BOOST_AUTO_TEST_CASE(custom_alloc_value) {value>({{1,2},{3,4},{5,6}}, {1, 42});} - -BOOST_AUTO_TEST_CASE(full_map) { - dice::sparse_map::sparse_map, std::equal_to, OffsetAllocator>> map; - std::vector> data = { - {0,1},{2,3},{4,5},{6,7},{8,9} - }; - map.insert(data.begin(), data.end()); - auto check = [&map](std::pair p) { - if (!map.contains(p.first)) return false; - return map.at(p.first) == p.second; - }; - //size did not match - BOOST_REQUIRE(data.size() == map.size()); - //map did not contain all values - BOOST_REQUIRE(std::all_of(data.begin(), data.end(), check)); +#define TEST_MAPS STD, CUSTOM + +TEST_SUITE("sparse map with fancy pointers") { + TEST_CASE_TEMPLATE("construction", T, TEST_MAPS) { + auto map = details::default_construct_map(); + } + + TEST_CASE_TEMPLATE("insert", T, TEST_MAPS) { + std::initializer_list l{{1,2},{3,4},{5,6}}; + + auto map = details::default_construct_map(); + for (auto dataPair : l) map.insert(dataPair); + //'insert' did not create exactly the values needed + REQUIRE(details::is_equal(map, l)); + } + + TEST_CASE_TEMPLATE("iter insert", T, TEST_MAPS) { + std::initializer_list l{{1,2},{3,4},{5,6}}; + + auto map = details::default_construct_map(); + map.insert(l.begin(), l.end()); + //'insert' with iterators did not create exactly the values needed + REQUIRE(details::is_equal(map, l)); + } + + TEST_CASE_TEMPLATE("iter access", T, TEST_MAPS) { + typename T::value_type single_value{1,42}; + + auto map = details::default_construct_map(); + map.insert(single_value); + //iterator cannot access single value + REQUIRE((*map.begin()).first == single_value.first); + REQUIRE((*map.begin()).second == single_value.second); + } + + TEST_CASE_TEMPLATE("iter access multi", T, TEST_MAPS) { + std::initializer_list l{{1,2},{3,4},{5,6}}; + + auto map = details::default_construct_map(); + map.insert(l.begin(), l.end()); + std::vector l_sorted = l; + std::vector map_sorted(map.begin(), map.end()); + std::sort(l_sorted.begin(), l_sorted.end()); + std::sort(map_sorted.begin(), map_sorted.end()); + //iterating over the map didn't work + REQUIRE(std::equal(l_sorted.begin(), l_sorted.end(), + map_sorted.begin())); + } + + TEST_CASE_TEMPLATE("value", T, TEST_MAPS) { + typename T::value_type to_change{1, 42}; + std::initializer_list l{{1,2},{3,4},{5,6}}; + + auto map = details::default_construct_map(); + map.insert(l.begin(), l.end()); + map[to_change.first] = to_change.second; + + std::unordered_map check(l.begin(), l.end()); + check[to_change.first] = to_change.second; + + //changing a single value didn't work + REQUIRE(details::is_equal(map, check)); + } + + TEST_CASE("full map") { + dice::sparse_map::sparse_map, std::equal_to, OffsetAllocator>> map; + std::vector> data = { + {0,1},{2,3},{4,5},{6,7},{8,9} + }; + map.insert(data.begin(), data.end()); + auto check = [&map](std::pair p) { + if (!map.contains(p.first)) return false; + return map.at(p.first) == p.second; + }; + //size did not match + REQUIRE(data.size() == map.size()); + //map did not contain all values + REQUIRE(std::all_of(data.begin(), data.end(), check)); + } } - -BOOST_AUTO_TEST_SUITE_END() -BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/fancy_pointer/sparse_hash_set_tests.cpp b/tests/fancy_pointer/sparse_hash_set_tests.cpp index d0a9127..c7c7b86 100644 --- a/tests/fancy_pointer/sparse_hash_set_tests.cpp +++ b/tests/fancy_pointer/sparse_hash_set_tests.cpp @@ -2,10 +2,11 @@ * @brief Checks for fancy pointer support in the sparse_hash implementation for single values (sets). */ -#include -#include -#include -#include +#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN +#include + +#include +#include #include "CustomAllocator.hpp" /* Tests are analogous to the tests in sparse_array_tests.cpp. @@ -15,23 +16,24 @@ namespace details { template struct KeySelect { using key_type = Key; - const key_type &operator()(Key const &key) const noexcept { return key; } - key_type &operator()(Key &key) noexcept { return key; } + using both_type = Key const; + + static key_type const &both(Key const &key) noexcept { return key; } + static key_type const &key(Key const &key) noexcept { return key; } }; template - using sparse_set = dice::sparse_map::detail_sparse_hash::sparse_hash< - T, KeySelect, void, std::hash, std::equal_to, Alloc, - dice::sparse_map::sh::power_of_two_growth_policy<2>, - dice::sparse_map::sh::exception_safety::basic, - dice::sparse_map::sh::sparsity::medium, - dice::sparse_map::sh::probing::quadratic>; - - template - typename T::Set default_construct_set() { - using Type = typename T::value_type; - return typename T::Set(T::Set::DEFAULT_INIT_BUCKET_COUNT, std::hash(), std::equal_to(), - typename T::Allocator(), T::Set::DEFAULT_MAX_LOAD_FACTOR); + using sparse_set = dice::sparse_map::internal::sparse_hash< + T, KeySelect, std::hash, std::equal_to, Alloc, + dice::sparse_map::power_of_two_growth_policy<2>, + dice::sparse_map::exception_safety::basic, + dice::sparse_map::sparsity::medium, + dice::sparse_map::probing::quadratic, + dice::sparse_map::default_max_load_factor>; + + template + Set default_construct_set() { + return Set{Set::default_init_bucket_count, {}, {}, {}}; } /** checks if all values of the set are in the initializer_list and than if the lengths are equal. @@ -45,99 +47,6 @@ namespace details { } } -template -void construction() { - auto set = details::default_construct_set(); -} - -template -void insert(std::initializer_list l) { - auto set = details::default_construct_set(); - for (auto const& i: l) set.insert(i); - //'insert' did not create exactly the values needed - BOOST_REQUIRE(details::is_equal(set, l)); -} - -template -void iterator_insert(std::initializer_list l) { - auto set = details::default_construct_set(); - set.insert(l.begin(), l.end()); - //'insert' with iterators did not create exactly the values needed - BOOST_REQUIRE(details::is_equal(set, l)); -} - -template -void iterator_access(typename T::value_type single_value) { - auto set = details::default_construct_set(); - set.insert(single_value); - //iterator cannot access single value - BOOST_REQUIRE(*(set.begin()) == single_value); -} - -template -void iterator_access_multi(std::initializer_list l) { - auto set = details::default_construct_set(); - set.insert(l.begin(), l.end()); - std::vector l_sorted = l; - std::vector set_sorted(set.begin(), set.end()); - std::sort(l_sorted.begin(), l_sorted.end()); - std::sort(set_sorted.begin(), set_sorted.end()); - //iterating over the set didn't work - BOOST_REQUIRE(std::equal(l_sorted.begin(), l_sorted.end(), - set_sorted.begin())); -} - - -template -void const_iterator_access_multi(std::initializer_list l) { - auto set = details::default_construct_set(); - set.insert(l.begin(), l.end()); - std::vector l_sorted = l; - std::vector set_sorted(set.cbegin(), set.cend()); - std::sort(l_sorted.begin(), l_sorted.end()); - std::sort(set_sorted.begin(), set_sorted.end()); - //const iterating over the set didn't work - BOOST_REQUIRE(std::equal(l_sorted.begin(), l_sorted.end(), - set_sorted.begin())); -} - -template -void find(std::initializer_list l, typename T::value_type search_value, bool is_in_list) { - auto set = details::default_construct_set(); - set.insert(l.begin(), l.end()); - auto iter = set.find(search_value); - bool found = iter != set.end(); - //find did not work as expected - BOOST_REQUIRE((found == is_in_list)); -} - -template -void erase(std::initializer_list l, typename T::value_type extra_value) { - auto set = details::default_construct_set(); - set.insert(extra_value); - set.insert(l.begin(), l.end()); - // force non-const iterator - auto iter = set.begin(); - for(; *iter != extra_value; ++iter); - set.erase(iter); - //erase did not work as expected - BOOST_REQUIRE(details::is_equal(set, l)); -} - -template -void erase_with_const_iter(std::initializer_list l, typename T::value_type extra_value) { - auto set = details::default_construct_set(); - set.insert(extra_value); - set.insert(l.begin(), l.end()); - //force const iterator - auto iter = set.cbegin(); - for(; *iter != extra_value; ++iter); - set.erase(iter); - //erase did not work as expected - BOOST_REQUIRE(details::is_equal(set, l)); -} - - template struct STD { using value_type = T; @@ -145,49 +54,124 @@ struct STD { using Set = details::sparse_set; }; -template -struct CUSTOM { - using value_type = T; - using Allocator = OffsetAllocator; - using Set = details::sparse_set; -}; - -BOOST_AUTO_TEST_SUITE(fancy_pointers) -BOOST_AUTO_TEST_SUITE(sparse_hash_set_tests) - -BOOST_AUTO_TEST_CASE(std_alloc_compiles) {construction>();} -BOOST_AUTO_TEST_CASE(std_alloc_insert) {insert>({1,2,3,4});} -BOOST_AUTO_TEST_CASE(std_alloc_iterator_insert) {iterator_insert>({1,2,3,4});} -BOOST_AUTO_TEST_CASE(std_alloc_iterator_access) {iterator_access>(42);} -BOOST_AUTO_TEST_CASE(std_alloc_iterator_access_multi) {iterator_access_multi>({1,2,3,4});} -BOOST_AUTO_TEST_CASE(std_alloc_const_iterator_access_multi) {const_iterator_access_multi>({1,2,3,4});} -BOOST_AUTO_TEST_CASE(std_find_true) {find>({1,2,3,4}, 4, true);} -BOOST_AUTO_TEST_CASE(std_find_false) {find>({1,2,3,4}, 5, false);} -BOOST_AUTO_TEST_CASE(std_erase) {erase>({1,2,3,4}, 5);} -BOOST_AUTO_TEST_CASE(std_erase_with_const_iter) {erase_with_const_iter>({1,2,3,4}, 5);} - -BOOST_AUTO_TEST_CASE(custom_alloc_compiles) {construction>();} -BOOST_AUTO_TEST_CASE(custom_alloc_insert) {insert>({1,2,3,4});} -BOOST_AUTO_TEST_CASE(custom_alloc_iterator_insert) {iterator_insert>({1,2,3,4});} -BOOST_AUTO_TEST_CASE(custom_alloc_iterator_access) {iterator_access>(42);} -BOOST_AUTO_TEST_CASE(custom_alloc_iterator_access_multi) {iterator_access_multi>({1,2,3,4});} -BOOST_AUTO_TEST_CASE(custom_alloc_const_iterator_access_multi) {const_iterator_access_multi>({1,2,3,4});} -BOOST_AUTO_TEST_CASE(custom_find_true) {find>({1,2,3,4}, 4, true);} -BOOST_AUTO_TEST_CASE(custom_find_false) {find>({1,2,3,4}, 5, false);} -BOOST_AUTO_TEST_CASE(custom_erase) {erase>({1,2,3,4}, 5);} -BOOST_AUTO_TEST_CASE(custom_erase_with_const_iter) {erase_with_const_iter>({1,2,3,4}, 5);} - -BOOST_AUTO_TEST_CASE(full_set) { - dice::sparse_map::sparse_set, std::equal_to, OffsetAllocator> set; - std::vector data = {1,2,3,4,5,6,7,8,9}; - set.insert(data.begin(), data.end()); - auto check = [&set](int d) {return set.contains(d);}; - //size did not match - BOOST_REQUIRE(data.size() == set.size()); - //Set did not contain all values - BOOST_REQUIRE(std::all_of(data.begin(), data.end(), check)); +#define TEST_TYPES details::sparse_set>, \ + details::sparse_set> + +TEST_SUITE("sparse set with fancy pointers") { + TEST_CASE_TEMPLATE("construction", T, TEST_TYPES) { + auto set = details::default_construct_set(); + } + + TEST_CASE_TEMPLATE("insert", T, TEST_TYPES) { + std::initializer_list l{1,2,3,4}; + + auto set = details::default_construct_set(); + for (auto const& i: l) set.insert(i); + //'insert' did not create exactly the values needed + REQUIRE(details::is_equal(set, l)); + } + + TEST_CASE_TEMPLATE("iter insert", T, TEST_TYPES) { + std::initializer_list l{1,2,3,4}; + + auto set = details::default_construct_set(); + set.insert(l.begin(), l.end()); + //'insert' with iterators did not create exactly the values needed + REQUIRE(details::is_equal(set, l)); + } + + TEST_CASE_TEMPLATE("iter access", T, TEST_TYPES) { + typename T::value_type single_value = 42; + + auto set = details::default_construct_set(); + set.insert(single_value); + //iterator cannot access single value + REQUIRE(*(set.begin()) == single_value); + } + + TEST_CASE_TEMPLATE("iter access multi", T, TEST_TYPES) { + std::initializer_list l{1,2,3,4}; + + auto set = details::default_construct_set(); + set.insert(l.begin(), l.end()); + std::vector l_sorted = l; + std::vector set_sorted(set.begin(), set.end()); + std::sort(l_sorted.begin(), l_sorted.end()); + std::sort(set_sorted.begin(), set_sorted.end()); + //iterating over the set didn't work + REQUIRE(std::equal(l_sorted.begin(), l_sorted.end(), + set_sorted.begin())); + } + + TEST_CASE_TEMPLATE("const iter access multi", T, TEST_TYPES) { + std::initializer_list l{1,2,3,4}; + + auto set = details::default_construct_set(); + set.insert(l.begin(), l.end()); + std::vector l_sorted = l; + std::vector set_sorted(set.cbegin(), set.cend()); + std::sort(l_sorted.begin(), l_sorted.end()); + std::sort(set_sorted.begin(), set_sorted.end()); + //const iterating over the set didn't work + REQUIRE(std::equal(l_sorted.begin(), l_sorted.end(), + set_sorted.begin())); + } + + TEST_CASE_TEMPLATE("find", T, TEST_TYPES) { + std::initializer_list l{1,2,3,4}; + + auto set = details::default_construct_set(); + set.insert(l.begin(), l.end()); + + SUBCASE("exists") { + auto iter = set.find(4); + REQUIRE(iter != set.end()); + } + + SUBCASE("not exists") { + auto iter = set.find(5); + REQUIRE(iter == set.end()); + } + } + + TEST_CASE_TEMPLATE("erase", T, TEST_TYPES) { + std::initializer_list l{1,2,3,4}; + typename T::value_type extra_value = 5; + + SUBCASE("iter") { + auto set = details::default_construct_set(); + set.insert(extra_value); + set.insert(l.begin(), l.end()); + // force non-const iterator + auto iter = set.begin(); + for(; *iter != extra_value; ++iter); + set.erase(iter); + //erase did not work as expected + REQUIRE(details::is_equal(set, l)); + } + + SUBCASE("const iter") { + auto set = details::default_construct_set(); + set.insert(extra_value); + set.insert(l.begin(), l.end()); + //force const iterator + auto iter = set.cbegin(); + for(; *iter != extra_value; ++iter); + set.erase(iter); + //erase did not work as expected + REQUIRE(details::is_equal(set, l)); + } + } + + TEST_CASE("full set") { + dice::sparse_map::sparse_set, std::equal_to, OffsetAllocator> set; + std::vector data = {1,2,3,4,5,6,7,8,9}; + set.insert(data.begin(), data.end()); + auto check = [&set](int d) {return set.contains(d);}; + //size did not match + REQUIRE(data.size() == set.size()); + //Set did not contain all values + REQUIRE(std::all_of(data.begin(), data.end(), check)); + } } - -BOOST_AUTO_TEST_SUITE_END() -BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/main.cpp b/tests/main.cpp deleted file mode 100644 index fa1e24c..0000000 --- a/tests/main.cpp +++ /dev/null @@ -1,26 +0,0 @@ -/** - * MIT License - * - * Copyright (c) 2017 Thibaut Goetghebuer-Planchon - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#define BOOST_TEST_MODULE sparse_map_tests - -#include diff --git a/tests/policy_tests.cpp b/tests/policy_tests.cpp index 74996c7..fc7acde 100644 --- a/tests/policy_tests.cpp +++ b/tests/policy_tests.cpp @@ -21,74 +21,71 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include +#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN +#include + +#include -#include -#include -#include #include #include #include -BOOST_AUTO_TEST_SUITE(test_policy) +#define TEST_POLICIES dice::sparse_map::power_of_two_growth_policy<2>, \ + dice::sparse_map::power_of_two_growth_policy<4>, \ + dice::sparse_map::prime_growth_policy, \ + dice::sparse_map::mod_growth_policy<>, \ + dice::sparse_map::mod_growth_policy> -using test_types = - boost::mpl::list, - dice::sparse_map::sh::power_of_two_growth_policy<4>, - dice::sparse_map::sh::prime_growth_policy, dice::sparse_map::sh::mod_growth_policy<>, - dice::sparse_map::sh::mod_growth_policy>>; +TEST_SUITE("policies") { + TEST_CASE_TEMPLATE("test policy", Policy, TEST_POLICIES) { -BOOST_AUTO_TEST_CASE_TEMPLATE(test_policy, Policy, test_types) { - // Call next_bucket_count() on the policy until we reach its - // max_bucket_count() - bool exception_thrown = false; + // Call next_bucket_count() on the policy until we reach its + // max_bucket_count() + bool exception_thrown = false; - std::size_t bucket_count = 0; - Policy policy(bucket_count); + std::size_t bucket_count = 0; + Policy policy(bucket_count); - BOOST_CHECK_EQUAL(policy.bucket_for_hash(0), 0); - BOOST_CHECK_EQUAL(bucket_count, 0); + CHECK_EQ(policy.bucket_for_hash(0), 0); + CHECK_EQ(bucket_count, 0); - try { - while (true) { - const std::size_t previous_bucket_count = bucket_count; + try { + while (true) { + const std::size_t previous_bucket_count = bucket_count; - bucket_count = policy.next_bucket_count(); - policy = Policy(bucket_count); + bucket_count = policy.next_bucket_count(); + policy = Policy(bucket_count); - BOOST_CHECK_EQUAL(policy.bucket_for_hash(0), 0); - BOOST_CHECK(bucket_count > previous_bucket_count); - } - } catch (const std::length_error&) { - exception_thrown = true; - } + CHECK_EQ(policy.bucket_for_hash(0), 0); + CHECK(bucket_count > previous_bucket_count); + } + } catch (const std::length_error&) { + exception_thrown = true; + } - BOOST_CHECK(exception_thrown); -} + CHECK(exception_thrown); + } -BOOST_AUTO_TEST_CASE_TEMPLATE(test_policy_min_bucket_count, Policy, - test_types) { - // Check policy when a bucket_count of 0 is asked. - std::size_t bucket_count = 0; - Policy policy(bucket_count); + TEST_CASE_TEMPLATE("min bucket count", Policy, TEST_POLICIES) { + // Check policy when a bucket_count of 0 is asked. + std::size_t bucket_count = 0; + Policy policy(bucket_count); - BOOST_CHECK_EQUAL(policy.bucket_for_hash(0), 0); -} + CHECK_EQ(policy.bucket_for_hash(0), 0); + } -BOOST_AUTO_TEST_CASE_TEMPLATE(test_policy_max_bucket_count, Policy, - test_types) { - // Test a bucket_count equals to the max_bucket_count limit and above - std::size_t bucket_count = 0; - Policy policy(bucket_count); + TEST_CASE_TEMPLATE("max bucket count", Policy, TEST_POLICIES) { + // Test a bucket_count equals to the max_bucket_count limit and above + std::size_t bucket_count = 0; + Policy policy(bucket_count); - bucket_count = policy.max_bucket_count(); - Policy policy2(bucket_count); + bucket_count = policy.max_bucket_count(); + Policy policy2(bucket_count); - bucket_count = std::numeric_limits::max(); - BOOST_CHECK_THROW((Policy(bucket_count)), std::length_error); + bucket_count = std::numeric_limits::max(); + CHECK_THROWS_AS((Policy(bucket_count)), std::length_error); - bucket_count = policy.max_bucket_count() + 1; - BOOST_CHECK_THROW((Policy(bucket_count)), std::length_error); + bucket_count = policy.max_bucket_count() + 1; + CHECK_THROWS_AS((Policy(bucket_count)), std::length_error); + } } - -BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/popcount_tests.cpp b/tests/popcount_tests.cpp deleted file mode 100644 index 84e2851..0000000 --- a/tests/popcount_tests.cpp +++ /dev/null @@ -1,106 +0,0 @@ -/** - * MIT License - * - * Copyright (c) 2017 Thibaut Goetghebuer-Planchon - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include - -#include -#include -#include - -BOOST_AUTO_TEST_SUITE(test_popcount) - -BOOST_AUTO_TEST_CASE(test_popcount_1) { - std::uint32_t value = 0; - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::popcount(value), 0); - - value = 1; - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::popcount(value), 1); - - value = 2; - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::popcount(value), 1); - - value = 294967496; - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::popcount(value), 12); - - value = std::numeric_limits::max(); - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::popcount(value), 32); -} - -BOOST_AUTO_TEST_CASE(test_popcountll_1) { - std::uint64_t value = 0; - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::popcountll(value), 0); - - value = 1; - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::popcountll(value), 1); - - value = 2; - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::popcountll(value), 1); - - value = 294967496; - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::popcountll(value), 12); - - value = 8446744073709551416ull; - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::popcountll(value), 40); - - value = std::numeric_limits::max(); - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::popcountll(value), 64); -} - -BOOST_AUTO_TEST_CASE(test_fallback_popcount_1) { - std::uint32_t value = 0; - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::fallback_popcount(value), 0); - - value = 1; - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::fallback_popcount(value), 1); - - value = 2; - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::fallback_popcount(value), 1); - - value = 294967496; - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::fallback_popcount(value), 12); - - value = std::numeric_limits::max(); - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::fallback_popcount(value), 32); -} - -BOOST_AUTO_TEST_CASE(test_fallback_popcountll_1) { - std::uint64_t value = 0; - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::fallback_popcountll(value), 0); - - value = 1; - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::fallback_popcountll(value), 1); - - value = 2; - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::fallback_popcountll(value), 1); - - value = 294967496; - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::fallback_popcountll(value), 12); - - value = 8446744073709551416ull; - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::fallback_popcountll(value), 40); - - value = std::numeric_limits::max(); - BOOST_CHECK_EQUAL(dice::sparse_map::detail_popcount::fallback_popcountll(value), 64); -} - -BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/scoped_allocator_adaptor/sparse_array_tests.cpp b/tests/scoped_allocator_adaptor/sparse_array_tests.cpp deleted file mode 100644 index bd7b45a..0000000 --- a/tests/scoped_allocator_adaptor/sparse_array_tests.cpp +++ /dev/null @@ -1,129 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -// Globals -constexpr auto MAX_INDEX = 32; // BITMAP_NB_BITS - -template void compilation() { typename T::Array test; } - -template void construction() { - typename T::Allocator a; - typename T::Array test(MAX_INDEX, a); - test.clear(a); -} - -template -void set(std::initializer_list l) { - typename T::Allocator a; - typename T::Array array(MAX_INDEX, a); - std::vector check; - check.reserve(l.size()); - std::size_t counter = 0; - for (auto const &value : l) { - array.set(a, counter++, value); - check.emplace_back(value); - } - //'set' did not create the correct order of items - BOOST_REQUIRE(std::equal(array.begin(), array.end(), check.begin())); - array.clear(a); -} - -template void uses_allocator() { - //uses_allocator returns false - BOOST_REQUIRE((std::uses_allocator::value)); -} - -template -void trailing_allocator_convention(Args...) { - using Alloc = typename T::Allocator; - //trailing_allocator thinks construction is not possible - BOOST_REQUIRE((std::is_constructible::value)); -} - -template void trailing_allocator_convention_without_parameters() { - using Alloc = typename std::allocator_traits< - typename T::Allocator>::template rebind_alloc; - //trailing_allocator thinks construction is not possible - BOOST_REQUIRE((std::is_constructible::value)); -} - -template -void is_move_insertable(std::initializer_list l) { - using A = typename std::allocator_traits< - typename T::Allocator>::template rebind_alloc; - A m; - auto p = std::allocator_traits::allocate(m, 1); - typename T::Allocator ArrayAlloc; - typename T::Array rv(MAX_INDEX, ArrayAlloc); - std::size_t counter = 0; - for (auto const &value : l) { - rv.set(ArrayAlloc, counter++, value); - } - std::allocator_traits::construct(m, p, std::move(rv)); - rv.clear(ArrayAlloc); - p->clear(ArrayAlloc); - std::allocator_traits::destroy(m, p); - std::allocator_traits::deallocate(m, p, 1); -} - -template void is_default_insertable() { - using A = typename std::allocator_traits< - typename T::Allocator>::template rebind_alloc; - A m; - typename T::Array *p = std::allocator_traits::allocate(m, 1); - std::allocator_traits::construct(m, p); - std::allocator_traits::deallocate(m, p, 1); -} - -template -struct NORMAL { - using value_type = T; - using Allocator = std::allocator; - using Array = dice::sparse_map::detail_sparse_hash::sparse_array; -}; - -template -struct SCOPED { - using value_type = T; - using Allocator = std::scoped_allocator_adaptor>; - using Array = dice::sparse_map::detail_sparse_hash::sparse_array; -}; - -BOOST_AUTO_TEST_SUITE(scoped_allocators) -BOOST_AUTO_TEST_SUITE(sparse_array_tests) - -BOOST_AUTO_TEST_CASE(normal_compilation) { compilation>(); } -BOOST_AUTO_TEST_CASE(normal_construction) { construction>(); } -BOOST_AUTO_TEST_CASE(normal_set) { set>({0, 1, 2, 3, 4}); } -BOOST_AUTO_TEST_CASE(normal_uses_allocator) { uses_allocator>(); } -BOOST_AUTO_TEST_CASE(normal_trailing_allocator_convention) { - trailing_allocator_convention>(0); -} -BOOST_AUTO_TEST_CASE(normal_is_move_insertable) { - is_move_insertable>({0, 1, 2, 3, 4, 5}); -} -BOOST_AUTO_TEST_CASE(normal_is_default_insertable) { - is_default_insertable>(); -} - -BOOST_AUTO_TEST_CASE(scoped_compilation) { compilation>(); } -BOOST_AUTO_TEST_CASE(scoped_construction) { construction>(); } -BOOST_AUTO_TEST_CASE(scoped_set) { set>({0, 1, 2, 3, 4}); } -BOOST_AUTO_TEST_CASE(scoped_uses_allocator) { uses_allocator>(); } -BOOST_AUTO_TEST_CASE(scoped_trailing_allocator_convention) { - trailing_allocator_convention>(0); -} -BOOST_AUTO_TEST_CASE(scoped_is_move_insertable) { - is_move_insertable>({0, 1, 2, 3, 4, 5}); -} -BOOST_AUTO_TEST_CASE(scoped_is_default_insertable) { - is_default_insertable>(); -} - -BOOST_AUTO_TEST_SUITE_END() -BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/scoped_allocator_adaptor/sparse_hash_set_tests.cpp b/tests/scoped_allocator_adaptor/sparse_hash_set_tests.cpp index ca3bdef..d078f2a 100644 --- a/tests/scoped_allocator_adaptor/sparse_hash_set_tests.cpp +++ b/tests/scoped_allocator_adaptor/sparse_hash_set_tests.cpp @@ -1,5 +1,7 @@ -#include -#include +#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN +#include + +#include #include namespace details { @@ -9,43 +11,54 @@ template struct KeySelect { key_type &operator()(Key &key) noexcept { return key; } }; +template +struct Hash { + std::size_t operator()(std::vector const &vec) const noexcept { + std::hash h; + std::size_t ret; + + for (auto const &e : vec) { + ret ^= h(e); + } + + return ret; + } +}; + template -using sparse_set = dice::sparse_map::detail_sparse_hash::sparse_hash< - T, details::KeySelect, void, std::hash, std::equal_to, Alloc, - dice::sparse_map::sh::power_of_two_growth_policy<2>, - dice::sparse_map::sh::exception_safety::basic, - dice::sparse_map::sh::sparsity::medium, - dice::sparse_map::sh::probing::quadratic>; +using sparse_set = dice::sparse_map::internal::sparse_hash< + T, details::KeySelect, Hash, std::equal_to, Alloc, + dice::sparse_map::power_of_two_growth_policy<2>, + dice::sparse_map::exception_safety::basic, + dice::sparse_map::sparsity::medium, + dice::sparse_map::probing::quadratic, + dice::sparse_map::default_max_load_factor>; + } // namespace details template void construction() { using Type = typename T::value_type; - typename T::Set(T::Set::DEFAULT_INIT_BUCKET_COUNT, std::hash(), - std::equal_to(), typename T::Allocator(), - T::Set::DEFAULT_MAX_LOAD_FACTOR); + typename T::Set(T::Set::default_init_bucket_count, details::Hash(), + std::equal_to(), typename T::Allocator()); } template struct NORMAL { - using value_type = T; - using Allocator = std::allocator; - using Set = details::sparse_set; + using value_type = std::vector; + using Allocator = std::allocator; + using Set = details::sparse_set; }; template struct SCOPED { - using value_type = T; - using Allocator = std::scoped_allocator_adaptor>; - using Set = details::sparse_set; + using value_type = std::vector; + using Allocator = std::scoped_allocator_adaptor, std::allocator>; + using Set = details::sparse_set; }; -BOOST_AUTO_TEST_SUITE(scoped_allocators) -BOOST_AUTO_TEST_SUITE(sparse_hash_set_tests) - -BOOST_AUTO_TEST_CASE(normal_construction){construction>();} +TEST_SUITE("sparse set with scoped allocator") { + TEST_CASE("normal construction"){construction>();} -BOOST_AUTO_TEST_CASE(scoped_construction){construction>();} - -BOOST_AUTO_TEST_SUITE_END() -BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file + TEST_CASE("scoped construction"){construction>();} +} diff --git a/tests/sparse_map_tests.cpp b/tests/sparse_map_tests.cpp index 3994e3d..c53d550 100644 --- a/tests/sparse_map_tests.cpp +++ b/tests/sparse_map_tests.cpp @@ -21,14 +21,12 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include +#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN +#include + +#include -#include -#include -#include -#include #include -#include #include #include #include @@ -36,1320 +34,1232 @@ #include #include #include +#include #include "utils.h" -BOOST_AUTO_TEST_SUITE(test_sparse_map) - -using test_types = boost::mpl::list< - dice::sparse_map::sparse_map, - dice::sparse_map::sparse_map, - // Test with hash having a lot of collisions - dice::sparse_map::sparse_map>, - dice::sparse_map::sparse_map>, - dice::sparse_map::sparse_map>, - dice::sparse_map::sparse_map>, - dice::sparse_map::sparse_map>, - - // Others GrowthPolicy - dice::sparse_map::sparse_map, - std::equal_to, - std::allocator>, - dice::sparse_map::sh::power_of_two_growth_policy<4>>, - dice::sparse_map::sparse_pg_map>, - dice::sparse_map::sparse_map, - std::equal_to, - std::allocator>, - dice::sparse_map::sh::mod_growth_policy<>>, - - dice::sparse_map::sparse_map, - std::equal_to, - std::allocator>, - dice::sparse_map::sh::power_of_two_growth_policy<4>>, - dice::sparse_map::sparse_pg_map>, - dice::sparse_map::sparse_map, - std::equal_to, - std::allocator>, - dice::sparse_map::sh::mod_growth_policy<>>, - - // Strong exception guarantee - dice::sparse_map::sparse_map, - std::equal_to, - std::allocator>, - dice::sparse_map::sh::power_of_two_growth_policy<2>, - dice::sparse_map::sh::exception_safety::strong>, - - // Others sparsity - dice::sparse_map::sparse_map, - std::equal_to, - std::allocator>, - dice::sparse_map::sh::power_of_two_growth_policy<2>, - dice::sparse_map::sh::exception_safety::basic, dice::sparse_map::sh::sparsity::high>, - dice::sparse_map::sparse_map, - std::equal_to, - std::allocator>, - dice::sparse_map::sh::power_of_two_growth_policy<2>, - dice::sparse_map::sh::exception_safety::basic, dice::sparse_map::sh::sparsity::low>>; - -/** - * insert - */ -BOOST_AUTO_TEST_CASE_TEMPLATE(test_insert, HMap, test_types) { - // insert x values, insert them again, check values - using key_t = typename HMap::key_type; - using value_t = typename HMap::mapped_type; - - const std::size_t nb_values = 1000; - HMap map(0); - BOOST_CHECK_EQUAL(map.bucket_count(), 0); - - typename HMap::iterator it; - bool inserted; - - for (std::size_t i = 0; i < nb_values; i++) { - std::tie(it, inserted) = - map.insert({utils::get_key(i), utils::get_value(i)}); - - BOOST_CHECK_EQUAL(it->first, utils::get_key(i)); - BOOST_CHECK_EQUAL(it->second, utils::get_value(i)); - BOOST_CHECK(inserted); - } - BOOST_CHECK_EQUAL(map.size(), nb_values); - - for (std::size_t i = 0; i < nb_values; i++) { - std::tie(it, inserted) = map.insert( - {utils::get_key(i), utils::get_value(i + 1)}); - - BOOST_CHECK_EQUAL(it->first, utils::get_key(i)); - BOOST_CHECK_EQUAL(it->second, utils::get_value(i)); - BOOST_CHECK(!inserted); - } - - for (std::size_t i = 0; i < nb_values; i++) { - it = map.find(utils::get_key(i)); - - BOOST_CHECK_EQUAL(it->first, utils::get_key(i)); - BOOST_CHECK_EQUAL(it->second, utils::get_value(i)); - } -} - -BOOST_AUTO_TEST_CASE(test_range_insert) { - // create a vector of values to insert, insert part of them in the - // map, check values - const int nb_values = 1000; - std::vector> values_to_insert(nb_values); - for (int i = 0; i < nb_values; i++) { - values_to_insert[i] = std::make_pair(i, i + 1); - } - - dice::sparse_map::sparse_map map = {{-1, 1}, {-2, 2}}; - map.insert(std::next(values_to_insert.begin(), 10), - values_to_insert.end() - 5); - - BOOST_CHECK_EQUAL(map.size(), 987); - - BOOST_CHECK_EQUAL(map[-1], 1); - BOOST_CHECK_EQUAL(map[-2], 2); - - for (int i = 10; i < nb_values - 5; i++) { - BOOST_CHECK_EQUAL(map[i], i + 1); - } -} - -BOOST_AUTO_TEST_CASE(test_insert_with_hint) { - dice::sparse_map::sparse_map map{{1, 0}, {2, 1}, {3, 2}}; - - // Wrong hint - BOOST_CHECK(map.insert(map.find(2), std::make_pair(3, 4)) == map.find(3)); - - // Good hint - BOOST_CHECK(map.insert(map.find(2), std::make_pair(2, 4)) == map.find(2)); - - // end() hint - BOOST_CHECK(map.insert(map.find(10), std::make_pair(2, 4)) == map.find(2)); - - BOOST_CHECK_EQUAL(map.size(), 3); - - // end() hint, new value - BOOST_CHECK_EQUAL(map.insert(map.find(10), std::make_pair(4, 3))->first, 4); - - // Wrong hint, new value - BOOST_CHECK_EQUAL(map.insert(map.find(2), std::make_pair(5, 4))->first, 5); - - BOOST_CHECK_EQUAL(map.size(), 5); -} - -/** +#define TEST_MAPS dice::sparse_map::sparse_map, \ + dice::sparse_map::sparse_map, \ + dice::sparse_map::sparse_map>, \ + dice::sparse_map::sparse_map>, \ + dice::sparse_map::sparse_map>, \ + dice::sparse_map::sparse_map>, \ + dice::sparse_map::sparse_map>, \ + \ + dice::sparse_map::sparse_map, \ + std::equal_to, \ + std::allocator>, \ + dice::sparse_map::power_of_two_growth_policy<4>>, \ + dice::sparse_map::sparse_pg_map>, \ + dice::sparse_map::sparse_map, \ + std::equal_to, \ + std::allocator>, \ + dice::sparse_map::mod_growth_policy<>>, \ + \ + dice::sparse_map::sparse_map, \ + std::equal_to, \ + std::allocator>, \ + dice::sparse_map::power_of_two_growth_policy<4>>, \ + dice::sparse_map::sparse_pg_map>, \ + dice::sparse_map::sparse_map, \ + std::equal_to, \ + std::allocator>, \ + dice::sparse_map::mod_growth_policy<>>, \ + \ + \ + dice::sparse_map::sparse_map, \ + std::equal_to, \ + std::allocator>, \ + dice::sparse_map::power_of_two_growth_policy<2>, \ + dice::sparse_map::exception_safety::strong>, \ + \ + \ + dice::sparse_map::sparse_map, \ + std::equal_to, \ + std::allocator>, \ + dice::sparse_map::power_of_two_growth_policy<2>, \ + dice::sparse_map::exception_safety::basic, \ + dice::sparse_map::sparsity::high>, \ + dice::sparse_map::sparse_map, \ + std::equal_to, \ + std::allocator>, \ + dice::sparse_map::power_of_two_growth_policy<2>, \ + dice::sparse_map::exception_safety::basic, dice::sparse_map::sparsity::low> + +TEST_SUITE("sparse map") { + + /** + * insert + */ + TEST_CASE_TEMPLATE("insert", HMap, TEST_MAPS) { + // insert x values, insert them again, check values + using key_t = typename HMap::key_type; + using value_t = typename HMap::mapped_type; + + const std::size_t nb_values = 1000; + HMap map(0); + CHECK_EQ(map.bucket_count(), 0); + + + for (std::size_t i = 0; i < nb_values; i++) { + auto [it, inserted] = map.insert({utils::get_key(i), utils::get_value(i)}); + + CHECK_EQ(it->first, utils::get_key(i)); + CHECK_EQ(it->second, utils::get_value(i)); + CHECK(inserted); + } + CHECK_EQ(map.size(), nb_values); + + for (std::size_t i = 0; i < nb_values; i++) { + auto [it, inserted] = map.insert({utils::get_key(i), utils::get_value(i + 1)}); + + CHECK_EQ(it->first, utils::get_key(i)); + CHECK_EQ(it->second, utils::get_value(i)); + CHECK(!inserted); + } + + for (std::size_t i = 0; i < nb_values; i++) { + auto it = map.find(utils::get_key(i)); + + CHECK_EQ(it->first, utils::get_key(i)); + CHECK_EQ(it->second, utils::get_value(i)); + } + } + + TEST_CASE("range insert") { + // create a vector of values to insert, insert part of them in the + // map, check values + const int nb_values = 1000; + std::vector> values_to_insert(nb_values); + for (int i = 0; i < nb_values; i++) { + values_to_insert[i] = std::make_pair(i, i + 1); + } + + dice::sparse_map::sparse_map map = {{-1, 1}, {-2, 2}}; + map.insert(std::next(values_to_insert.begin(), 10), + values_to_insert.end() - 5); + + CHECK_EQ(map.size(), 987); + + CHECK_EQ(map[-1], 1); + CHECK_EQ(map[-2], 2); + + for (int i = 10; i < nb_values - 5; i++) { + CHECK_EQ(map[i], i + 1); + } + } + + TEST_CASE("insert with hint") { + dice::sparse_map::sparse_map map{{1, 0}, {2, 1}, {3, 2}}; + + // Wrong hint + CHECK_EQ(map.insert(map.find(2), std::make_pair(3, 4)), map.find(3)); + + // Good hint + CHECK_EQ(map.insert(map.find(2), std::make_pair(2, 4)), map.find(2)); + + // end() hint + CHECK_EQ(map.insert(map.find(10), std::make_pair(2, 4)), map.find(2)); + + CHECK_EQ(map.size(), 3); + + // end() hint, new value + CHECK_EQ(map.insert(map.find(10), std::make_pair(4, 3))->first, 4); + + // Wrong hint, new value + CHECK_EQ(map.insert(map.find(2), std::make_pair(5, 4))->first, 5); + + CHECK_EQ(map.size(), 5); + } + + /** * emplace_hint */ -BOOST_AUTO_TEST_CASE(test_emplace_hint) { - dice::sparse_map::sparse_map map{{1, 0}, {2, 1}, {3, 2}}; - - // Wrong hint - BOOST_CHECK(map.emplace_hint(map.find(2), std::piecewise_construct, - std::forward_as_tuple(3), - std::forward_as_tuple(4)) == map.find(3)); - - // Good hint - BOOST_CHECK(map.emplace_hint(map.find(2), std::piecewise_construct, - std::forward_as_tuple(2), - std::forward_as_tuple(4)) == map.find(2)); - - // end() hint - BOOST_CHECK(map.emplace_hint(map.find(10), std::piecewise_construct, - std::forward_as_tuple(2), - std::forward_as_tuple(4)) == map.find(2)); - - BOOST_CHECK_EQUAL(map.size(), 3); - - // end() hint, new value - BOOST_CHECK_EQUAL( - map.emplace_hint(map.find(10), std::piecewise_construct, - std::forward_as_tuple(4), std::forward_as_tuple(3)) - ->first, - 4); - - // Wrong hint, new value - BOOST_CHECK_EQUAL( - map.emplace_hint(map.find(2), std::piecewise_construct, - std::forward_as_tuple(5), std::forward_as_tuple(4)) - ->first, - 5); - - BOOST_CHECK_EQUAL(map.size(), 5); -} - -/** - * emplace - */ -BOOST_AUTO_TEST_CASE(test_emplace) { - dice::sparse_map::sparse_map map; - dice::sparse_map::sparse_map::iterator it; - bool inserted; - - std::tie(it, inserted) = - map.emplace(std::piecewise_construct, std::forward_as_tuple(10), - std::forward_as_tuple(1)); - BOOST_CHECK_EQUAL(it->first, 10); - BOOST_CHECK_EQUAL(it->second, move_only_test(1)); - BOOST_CHECK(inserted); - - std::tie(it, inserted) = - map.emplace(std::piecewise_construct, std::forward_as_tuple(10), - std::forward_as_tuple(3)); - BOOST_CHECK_EQUAL(it->first, 10); - BOOST_CHECK_EQUAL(it->second, move_only_test(1)); - BOOST_CHECK(!inserted); -} - -/** - * try_emplace - */ -BOOST_AUTO_TEST_CASE(test_try_emplace) { - dice::sparse_map::sparse_map map; - dice::sparse_map::sparse_map::iterator it; - bool inserted; - - std::tie(it, inserted) = map.try_emplace(10, 1); - BOOST_CHECK_EQUAL(it->first, 10); - BOOST_CHECK_EQUAL(it->second, move_only_test(1)); - BOOST_CHECK(inserted); - - std::tie(it, inserted) = map.try_emplace(10, 3); - BOOST_CHECK_EQUAL(it->first, 10); - BOOST_CHECK_EQUAL(it->second, move_only_test(1)); - BOOST_CHECK(!inserted); -} - -BOOST_AUTO_TEST_CASE(test_try_emplace_2) { - // Insert x values with try_emplace, insert them again, check with find. - dice::sparse_map::sparse_map map; - dice::sparse_map::sparse_map::iterator it; - bool inserted; - - const std::size_t nb_values = 1000; - for (std::size_t i = 0; i < nb_values; i++) { - std::tie(it, inserted) = map.try_emplace(utils::get_key(i), i); - - BOOST_CHECK_EQUAL(it->first, utils::get_key(i)); - BOOST_CHECK_EQUAL(it->second, move_only_test(i)); - BOOST_CHECK(inserted); - } - BOOST_CHECK_EQUAL(map.size(), nb_values); - - for (std::size_t i = 0; i < nb_values; i++) { - std::tie(it, inserted) = - map.try_emplace(utils::get_key(i), i + 1); - - BOOST_CHECK_EQUAL(it->first, utils::get_key(i)); - BOOST_CHECK_EQUAL(it->second, move_only_test(i)); - BOOST_CHECK(!inserted); - } - - for (std::size_t i = 0; i < nb_values; i++) { - it = map.find(utils::get_key(i)); - - BOOST_CHECK_EQUAL(it->first, utils::get_key(i)); - BOOST_CHECK_EQUAL(it->second, move_only_test(i)); - } -} - -BOOST_AUTO_TEST_CASE(test_try_emplace_hint) { - dice::sparse_map::sparse_map map(0); - - // end() hint, new value - auto it = map.try_emplace(map.find(10), 10, 1); - BOOST_CHECK_EQUAL(it->first, 10); - BOOST_CHECK_EQUAL(it->second, move_only_test(1)); - - // Good hint - it = map.try_emplace(map.find(10), 10, 3); - BOOST_CHECK_EQUAL(it->first, 10); - BOOST_CHECK_EQUAL(it->second, move_only_test(1)); - - // Wrong hint, new value - it = map.try_emplace(map.find(10), 1, 3); - BOOST_CHECK_EQUAL(it->first, 1); - BOOST_CHECK_EQUAL(it->second, move_only_test(3)); -} - -/** - * insert_or_assign - */ -BOOST_AUTO_TEST_CASE(test_insert_or_assign) { - dice::sparse_map::sparse_map map; - dice::sparse_map::sparse_map::iterator it; - bool inserted; - - std::tie(it, inserted) = map.insert_or_assign(10, move_only_test(1)); - BOOST_CHECK_EQUAL(it->first, 10); - BOOST_CHECK_EQUAL(it->second, move_only_test(1)); - BOOST_CHECK(inserted); - - std::tie(it, inserted) = map.insert_or_assign(10, move_only_test(3)); - BOOST_CHECK_EQUAL(it->first, 10); - BOOST_CHECK_EQUAL(it->second, move_only_test(3)); - BOOST_CHECK(!inserted); -} - -BOOST_AUTO_TEST_CASE(test_insert_or_assign_hint) { - dice::sparse_map::sparse_map map(0); - - // end() hint, new value - auto it = map.insert_or_assign(map.find(10), 10, move_only_test(1)); - BOOST_CHECK_EQUAL(it->first, 10); - BOOST_CHECK_EQUAL(it->second, move_only_test(1)); - - // Good hint - it = map.insert_or_assign(map.find(10), 10, move_only_test(3)); - BOOST_CHECK_EQUAL(it->first, 10); - BOOST_CHECK_EQUAL(it->second, move_only_test(3)); - - // Bad hint, new value - it = map.insert_or_assign(map.find(10), 1, move_only_test(3)); - BOOST_CHECK_EQUAL(it->first, 1); - BOOST_CHECK_EQUAL(it->second, move_only_test(3)); -} - -/** - * erase - */ -BOOST_AUTO_TEST_CASE(test_range_erase_all) { - // insert x values, delete all - using HMap = dice::sparse_map::sparse_map; - - const std::size_t nb_values = 1000; - HMap map = utils::get_filled_hash_map(nb_values); - - auto it = map.erase(map.begin(), map.end()); - BOOST_CHECK(it == map.end()); - BOOST_CHECK(map.empty()); -} - -BOOST_AUTO_TEST_CASE(test_range_erase) { - // insert x values, delete all except 10 first and 780 last values - using HMap = dice::sparse_map::sparse_map; - - const std::size_t nb_values = 1000; - HMap map = utils::get_filled_hash_map(nb_values); - - auto it_first = std::next(map.begin(), 10); - auto it_last = std::next(map.begin(), 220); - - auto it = map.erase(it_first, it_last); - BOOST_CHECK_EQUAL(std::distance(it, map.end()), 780); - BOOST_CHECK_EQUAL(map.size(), 790); - BOOST_CHECK_EQUAL(std::distance(map.begin(), map.end()), 790); - - for (auto& val : map) { - BOOST_CHECK_EQUAL(map.count(val.first), 1); - } -} - -BOOST_AUTO_TEST_CASE_TEMPLATE(test_erase_loop, HMap, test_types) { - // insert x values, delete all one by one with iterator - std::size_t nb_values = 1000; - - HMap map = utils::get_filled_hash_map(nb_values); - HMap map2 = utils::get_filled_hash_map(nb_values); - - auto it = map.begin(); - // Use second map to check for key after delete as we may not copy the key - // with move-only types. - auto it2 = map2.begin(); - while (it != map.end()) { - it = map.erase(it); - --nb_values; - - BOOST_CHECK_EQUAL(map.count(it2->first), 0); - BOOST_CHECK_EQUAL(map.size(), nb_values); - ++it2; - } - - BOOST_CHECK(map.empty()); -} - -BOOST_AUTO_TEST_CASE_TEMPLATE(test_erase_loop_range, HMap, test_types) { - // insert x values, delete all five by five with iterators - const std::size_t hop = 5; - std::size_t nb_values = 1000; - - BOOST_REQUIRE_EQUAL(nb_values % hop, 0); - - HMap map = utils::get_filled_hash_map(nb_values); - - auto it = map.begin(); - while (it != map.end()) { - it = map.erase(it, std::next(it, hop)); - nb_values -= hop; - - BOOST_CHECK_EQUAL(map.size(), nb_values); - } - - BOOST_CHECK(map.empty()); -} - -BOOST_AUTO_TEST_CASE_TEMPLATE(test_insert_erase_insert, HMap, test_types) { - // insert x/2 values, delete x/4 values, insert x/2 values, find each value - using key_t = typename HMap::key_type; - using value_t = typename HMap::mapped_type; - - const std::size_t nb_values = 2000; - HMap map(10); - typename HMap::iterator it; - bool inserted; - - // Insert nb_values/2 - for (std::size_t i = 0; i < nb_values / 2; i++) { - std::tie(it, inserted) = - map.insert({utils::get_key(i), utils::get_value(i)}); - - BOOST_CHECK_EQUAL(it->first, utils::get_key(i)); - BOOST_CHECK_EQUAL(it->second, utils::get_value(i)); - BOOST_CHECK(inserted); - } - BOOST_CHECK_EQUAL(map.size(), nb_values / 2); - - // Delete nb_values/4 - for (std::size_t i = 0; i < nb_values / 2; i++) { - if (i % 2 == 0) { - BOOST_CHECK_EQUAL(map.erase(utils::get_key(i)), 1); - } - } - BOOST_CHECK_EQUAL(map.size(), nb_values / 4); - - // Insert nb_values/2 - for (std::size_t i = nb_values / 2; i < nb_values; i++) { - std::tie(it, inserted) = - map.insert({utils::get_key(i), utils::get_value(i)}); - - BOOST_CHECK_EQUAL(it->first, utils::get_key(i)); - BOOST_CHECK_EQUAL(it->second, utils::get_value(i)); - BOOST_CHECK(inserted); - } - BOOST_CHECK_EQUAL(map.size(), nb_values - nb_values / 4); - - // Find - for (std::size_t i = 0; i < nb_values; i++) { - if (i % 2 == 0 && i < nb_values / 2) { - it = map.find(utils::get_key(i)); - - BOOST_CHECK(it == map.end()); - } else { - it = map.find(utils::get_key(i)); - - BOOST_REQUIRE(it != map.end()); - BOOST_CHECK_EQUAL(it->first, utils::get_key(i)); - BOOST_CHECK_EQUAL(it->second, utils::get_value(i)); - } - } -} - -BOOST_AUTO_TEST_CASE(test_range_erase_same_iterators) { - // insert x values, test erase with same iterator as each parameter, check if - // returned mutable iterator is valid. - const std::size_t nb_values = 100; - auto map = - utils::get_filled_hash_map>( - nb_values); - - dice::sparse_map::sparse_map::const_iterator it_const = - map.cbegin(); - std::advance(it_const, 10); - - dice::sparse_map::sparse_map::iterator it_mutable = - map.erase(it_const, it_const); - BOOST_CHECK(it_const == it_mutable); - BOOST_CHECK(map.mutable_iterator(it_const) == it_mutable); - BOOST_CHECK_EQUAL(map.size(), 100); - - it_mutable.value() = -100; - BOOST_CHECK_EQUAL(it_const.value(), -100); -} - -/** - * rehash - */ -BOOST_AUTO_TEST_CASE(test_rehash_empty) { - // test rehash(0), test find/erase/insert on map. - const std::size_t nb_values = 100; - auto map = - utils::get_filled_hash_map>( - nb_values); - - const std::size_t bucket_count = map.bucket_count(); - BOOST_CHECK(bucket_count >= nb_values); - - map.clear(); - BOOST_CHECK_EQUAL(map.bucket_count(), bucket_count); - BOOST_CHECK(map.empty()); - - map.rehash(0); - BOOST_CHECK_EQUAL(map.bucket_count(), 0); - BOOST_CHECK(map.empty()); - - BOOST_CHECK(map.find(1) == map.end()); - BOOST_CHECK_EQUAL(map.erase(1), 0); - BOOST_CHECK(map.insert({1, 10}).second); - BOOST_CHECK_EQUAL(map.at(1), 10); -} - -/** + TEST_CASE("emplace hint") { + dice::sparse_map::sparse_map map{{1, 0}, {2, 1}, {3, 2}}; + + // Wrong hint + CHECK_EQ(map.emplace_hint(map.find(2), std::piecewise_construct, + std::forward_as_tuple(3), + std::forward_as_tuple(4)), + map.find(3)); + + // Good hint + CHECK_EQ(map.emplace_hint(map.find(2), std::piecewise_construct, + std::forward_as_tuple(2), + std::forward_as_tuple(4)), + map.find(2)); + + // end() hint + CHECK_EQ(map.emplace_hint(map.find(10), std::piecewise_construct, + std::forward_as_tuple(2), + std::forward_as_tuple(4)), + map.find(2)); + + CHECK_EQ(map.size(), 3); + + // end() hint, new value + CHECK_EQ(map.emplace_hint(map.find(10), std::piecewise_construct, + std::forward_as_tuple(4), std::forward_as_tuple(3))->first, + 4); + + // Wrong hint, new value + CHECK_EQ(map.emplace_hint(map.find(2), std::piecewise_construct, + std::forward_as_tuple(5), std::forward_as_tuple(4))->first, + 5); + + CHECK_EQ(map.size(), 5); + } + + TEST_CASE("emplace") { + dice::sparse_map::sparse_map map; + + auto [it, inserted] = map.emplace(std::piecewise_construct, + std::forward_as_tuple(10), + std::forward_as_tuple(1)); + CHECK_EQ(it->first, 10); + CHECK_EQ(it->second, move_only_test(1)); + CHECK(inserted); + + std::tie(it, inserted) = map.emplace(std::piecewise_construct, + std::forward_as_tuple(10), + std::forward_as_tuple(3)); + CHECK_EQ(it->first, 10); + CHECK_EQ(it->second, move_only_test(1)); + CHECK(!inserted); + } + + TEST_CASE("try emplace") { + dice::sparse_map::sparse_map map; + + auto [it, inserted] = map.try_emplace(10, 1); + CHECK_EQ(it->first, 10); + CHECK_EQ(it->second, move_only_test(1)); + CHECK(inserted); + + std::tie(it, inserted) = map.try_emplace(10, 3); + CHECK_EQ(it->first, 10); + CHECK_EQ(it->second, move_only_test(1)); + CHECK(!inserted); + } + + TEST_CASE("try emplace 2") { + // Insert x values with try_emplace, insert them again, check with find. + dice::sparse_map::sparse_map map; + + const std::size_t nb_values = 1000; + for (std::size_t i = 0; i < nb_values; i++) { + auto [it, inserted] = map.try_emplace(utils::get_key(i), i); + + CHECK_EQ(it->first, utils::get_key(i)); + CHECK_EQ(it->second, move_only_test(i)); + CHECK(inserted); + } + CHECK_EQ(map.size(), nb_values); + + for (std::size_t i = 0; i < nb_values; i++) { + auto [it, inserted] = map.try_emplace(utils::get_key(i), i + 1); + + CHECK_EQ(it->first, utils::get_key(i)); + CHECK_EQ(it->second, move_only_test(i)); + CHECK(!inserted); + } + + for (std::size_t i = 0; i < nb_values; i++) { + auto it = map.find(utils::get_key(i)); + + CHECK_EQ(it->first, utils::get_key(i)); + CHECK_EQ(it->second, move_only_test(i)); + } + } + + TEST_CASE("emplace hint") { + dice::sparse_map::sparse_map map(0); + + // end() hint, new value + auto it = map.try_emplace(map.find(10), 10, 1); + CHECK_EQ(it->first, 10); + CHECK_EQ(it->second, move_only_test(1)); + + // Good hint + it = map.try_emplace(map.find(10), 10, 3); + CHECK_EQ(it->first, 10); + CHECK_EQ(it->second, move_only_test(1)); + + // Wrong hint, new value + it = map.try_emplace(map.find(10), 1, 3); + CHECK_EQ(it->first, 1); + CHECK_EQ(it->second, move_only_test(3)); + } + + TEST_CASE("insert or assign") { + dice::sparse_map::sparse_map map; + + auto [it, inserted] = map.insert_or_assign(10, move_only_test(1)); + CHECK_EQ(it->first, 10); + CHECK_EQ(it->second, move_only_test(1)); + CHECK(inserted); + + std::tie(it, inserted) = map.insert_or_assign(10, move_only_test(3)); + CHECK_EQ(it->first, 10); + CHECK_EQ(it->second, move_only_test(3)); + CHECK(!inserted); + } + + TEST_CASE("insert or assign hint") { + dice::sparse_map::sparse_map map(0); + + // end() hint, new value + auto it = map.insert_or_assign(map.find(10), 10, move_only_test(1)); + CHECK_EQ(it->first, 10); + CHECK_EQ(it->second, move_only_test(1)); + + // Good hint + it = map.insert_or_assign(map.find(10), 10, move_only_test(3)); + CHECK_EQ(it->first, 10); + CHECK_EQ(it->second, move_only_test(3)); + + // Bad hint, new value + it = map.insert_or_assign(map.find(10), 1, move_only_test(3)); + CHECK_EQ(it->first, 1); + CHECK_EQ(it->second, move_only_test(3)); + } + + /*TEST_CASE("range erase all") { + // insert x values, delete all + using HMap = dice::sparse_map::sparse_map; + + const std::size_t nb_values = 1000; + HMap map = utils::get_filled_hash_map(nb_values); + + auto it = map.erase(map.begin(), map.end()); + CHECK(it == map.end()); + CHECK(map.empty()); + }*/ + + /*TEST_CASE("range erase") { + // insert x values, delete all except 10 first and 780 last values + using HMap = dice::sparse_map::sparse_map; + + const std::size_t nb_values = 1000; + HMap map = utils::get_filled_hash_map(nb_values); + + auto it_first = std::next(map.begin(), 10); + auto it_last = std::next(map.begin(), 220); + + auto it = map.erase(it_first, it_last); + CHECK_EQ(std::distance(it, map.end()), 780); + CHECK_EQ(map.size(), 790); + CHECK_EQ(std::distance(map.begin(), map.end()), 790); + + for (auto &val : map) { + CHECK_EQ(map.count(val.first), 1); + } + }*/ + + TEST_CASE_TEMPLATE("erase loop", HMap, TEST_MAPS) { + // insert x values, delete all one by one with iterator + std::size_t nb_values = 1000; + + HMap map = utils::get_filled_hash_map(nb_values); + HMap map2 = utils::get_filled_hash_map(nb_values); + + auto it = map.begin(); + // Use second map to check for key after delete as we may not copy the key + // with move-only types. + auto it2 = map2.begin(); + while (it != map.end()) { + it = map.erase(it); + --nb_values; + + CHECK_EQ(map.count(it2->first), 0); + CHECK_EQ(map.size(), nb_values); + ++it2; + } + + CHECK(map.empty()); + } + + /*TEST_CASE_TEMPLATE("erase loop range", HMap, TEST_MAPS) { + // insert x values, delete all five by five with iterators + const std::size_t hop = 5; + std::size_t nb_values = 1000; + + REQUIRE_EQ(nb_values % hop, 0); + + HMap map = utils::get_filled_hash_map(nb_values); + + auto it = map.begin(); + while (it != map.end()) { + it = map.erase(it, std::next(it, hop)); + nb_values -= hop; + + CHECK_EQ(map.size(), nb_values); + } + + CHECK(map.empty()); + }*/ + + TEST_CASE_TEMPLATE("insert erase insert", HMap, TEST_MAPS) { + // insert x/2 values, delete x/4 values, insert x/2 values, find each value + using key_t = typename HMap::key_type; + using value_t = typename HMap::mapped_type; + + const std::size_t nb_values = 2000; + HMap map(10); + + // Insert nb_values/2 + for (std::size_t i = 0; i < nb_values / 2; i++) { + auto [it, inserted] = map.insert({utils::get_key(i), utils::get_value(i)}); + + CHECK_EQ(it->first, utils::get_key(i)); + CHECK_EQ(it->second, utils::get_value(i)); + CHECK(inserted); + } + CHECK_EQ(map.size(), nb_values / 2); + + // Delete nb_values/4 + for (std::size_t i = 0; i < nb_values / 2; i++) { + if (i % 2 == 0) { + CHECK_EQ(map.erase(utils::get_key(i)), 1); + } + } + CHECK_EQ(map.size(), nb_values / 4); + + // Insert nb_values/2 + for (std::size_t i = nb_values / 2; i < nb_values; i++) { + auto [it, inserted] = map.insert({utils::get_key(i), utils::get_value(i)}); + + CHECK_EQ(it->first, utils::get_key(i)); + CHECK_EQ(it->second, utils::get_value(i)); + CHECK(inserted); + } + CHECK_EQ(map.size(), nb_values - nb_values / 4); + + // Find + for (std::size_t i = 0; i < nb_values; i++) { + if (i % 2 == 0 && i < nb_values / 2) { + auto it = map.find(utils::get_key(i)); + + CHECK(it == map.end()); + } else { + auto it = map.find(utils::get_key(i)); + + REQUIRE(it != map.end()); + CHECK_EQ(it->first, utils::get_key(i)); + CHECK_EQ(it->second, utils::get_value(i)); + } + } + } + + /*TEST_CASE("range erase same iter") { + // insert x values, test erase with same iterator as each parameter, check if + // returned mutable iterator is valid. + const std::size_t nb_values = 100; + auto map = + utils::get_filled_hash_map>( + nb_values); + + dice::sparse_map::sparse_map::const_iterator it_const = + map.cbegin(); + std::advance(it_const, 10); + + dice::sparse_map::sparse_map::iterator it_mutable = + map.erase(it_const, it_const); + CHECK(it_const == it_mutable); + //CHECK(map.mutable_iterator(it_const) == it_mutable); + CHECK_EQ(map.size(), 100); + + it_mutable->second = -100; + CHECK_EQ(it_const->second, -100); + }*/ + + /** + * rehash + */ + TEST_CASE("rehash empty") { + // test rehash(0), test find/erase/insert on map. + const std::size_t nb_values = 100; + auto map = + utils::get_filled_hash_map>( + nb_values); + + const std::size_t bucket_count = map.bucket_count(); + CHECK(bucket_count >= nb_values); + + map.clear(); + CHECK_EQ(map.bucket_count(), bucket_count); + CHECK(map.empty()); + + map.rehash(0); + CHECK_EQ(map.bucket_count(), 0); + CHECK(map.empty()); + + CHECK(map.find(1) == map.end()); + CHECK_EQ(map.erase(1), 0); + CHECK(map.insert({1, 10}).second); + CHECK_EQ(map.at(1), 10); + } + + /** * operator== and operator!= */ -BOOST_AUTO_TEST_CASE_TEMPLATE(test_compare, HMap, test_types) { - const dice::sparse_map::sparse_map map1 = { - {"a", 1}, {"e", 5}, {"d", 4}, {"c", 3}, {"b", 2}}; - const dice::sparse_map::sparse_map map1_copy = { - {"e", 5}, {"c", 3}, {"b", 2}, {"a", 1}, {"d", 4}}; - const dice::sparse_map::sparse_map map2 = { - {"e", 5}, {"c", 3}, {"b", 2}, {"a", 1}, {"d", 4}, {"f", 6}}; - const dice::sparse_map::sparse_map map3 = { - {"e", 5}, {"c", 3}, {"b", 2}, {"a", 1}}; - const dice::sparse_map::sparse_map map4 = { - {"a", 1}, {"e", 5}, {"d", 4}, {"c", 3}, {"b", 26}}; - const dice::sparse_map::sparse_map map5 = { - {"a", 1}, {"e", 5}, {"d", 4}, {"c", 3}, {"z", 2}}; - - BOOST_CHECK(map1 == map1_copy); - BOOST_CHECK(map1_copy == map1); - - BOOST_CHECK(map1 != map2); - BOOST_CHECK(map2 != map1); - - BOOST_CHECK(map1 != map3); - BOOST_CHECK(map3 != map1); - - BOOST_CHECK(map1 != map4); - BOOST_CHECK(map4 != map1); - - BOOST_CHECK(map1 != map5); - BOOST_CHECK(map5 != map1); - - BOOST_CHECK(map2 != map3); - BOOST_CHECK(map3 != map2); - - BOOST_CHECK(map2 != map4); - BOOST_CHECK(map4 != map2); - - BOOST_CHECK(map2 != map5); - BOOST_CHECK(map5 != map2); - - BOOST_CHECK(map3 != map4); - BOOST_CHECK(map4 != map3); - - BOOST_CHECK(map3 != map5); - BOOST_CHECK(map5 != map3); - - BOOST_CHECK(map4 != map5); - BOOST_CHECK(map5 != map4); -} - -/** - * clear - */ -BOOST_AUTO_TEST_CASE(test_clear) { - // insert x values, clear map - using HMap = dice::sparse_map::sparse_map; - - const std::size_t nb_values = 1000; - auto map = utils::get_filled_hash_map(nb_values); - - map.clear(); - BOOST_CHECK_EQUAL(map.size(), 0); - BOOST_CHECK_EQUAL(std::distance(map.begin(), map.end()), 0); - - map.insert({5, -5}); - map.insert({{1, -1}, {2, -1}, {4, -4}, {3, -3}}); - - BOOST_CHECK(map == (HMap({{5, -5}, {1, -1}, {2, -1}, {4, -4}, {3, -3}}))); -} - -/** - * iterator.value() - */ -BOOST_AUTO_TEST_CASE(test_modify_value_through_iterator) { - // insert x values, modify value of even keys, check values - const std::size_t nb_values = 100; - auto map = - utils::get_filled_hash_map>( - nb_values); - - for (auto it = map.begin(); it != map.end(); it++) { - if (it.key() % 2 == 0) { - it.value() = -1; - } - } - - for (auto& val : map) { - if (val.first % 2 == 0) { - BOOST_CHECK_EQUAL(val.second, -1); - } else { - BOOST_CHECK_NE(val.second, -1); - } - } -} - -/** + TEST_CASE_TEMPLATE("compare", HMap, TEST_MAPS) { + const dice::sparse_map::sparse_map map1 = { + {"a", 1}, + {"e", 5}, + {"d", 4}, + {"c", 3}, + {"b", 2}}; + const dice::sparse_map::sparse_map map1_copy = { + {"e", 5}, + {"c", 3}, + {"b", 2}, + {"a", 1}, + {"d", 4}}; + const dice::sparse_map::sparse_map map2 = { + {"e", 5}, + {"c", 3}, + {"b", 2}, + {"a", 1}, + {"d", 4}, + {"f", 6}}; + const dice::sparse_map::sparse_map map3 = { + {"e", 5}, + {"c", 3}, + {"b", 2}, + {"a", 1}}; + const dice::sparse_map::sparse_map map4 = { + {"a", 1}, + {"e", 5}, + {"d", 4}, + {"c", 3}, + {"b", 26}}; + const dice::sparse_map::sparse_map map5 = { + {"a", 1}, + {"e", 5}, + {"d", 4}, + {"c", 3}, + {"z", 2}}; + + CHECK(map1 == map1_copy); + CHECK(map1_copy == map1); + + CHECK(map1 != map2); + CHECK(map2 != map1); + + CHECK(map1 != map3); + CHECK(map3 != map1); + + CHECK(map1 != map4); + CHECK(map4 != map1); + + CHECK(map1 != map5); + CHECK(map5 != map1); + + CHECK(map2 != map3); + CHECK(map3 != map2); + + CHECK(map2 != map4); + CHECK(map4 != map2); + + CHECK(map2 != map5); + CHECK(map5 != map2); + + CHECK(map3 != map4); + CHECK(map4 != map3); + + CHECK(map3 != map5); + CHECK(map5 != map3); + + CHECK(map4 != map5); + CHECK(map5 != map4); + } + + TEST_CASE("clear") { + // insert x values, clear map + using HMap = dice::sparse_map::sparse_map; + + const std::size_t nb_values = 1000; + auto map = utils::get_filled_hash_map(nb_values); + + map.clear(); + CHECK_EQ(map.size(), 0); + CHECK_EQ(std::distance(map.begin(), map.end()), 0); + + map.insert({5, -5}); + map.insert({{1, -1}, {2, -1}, {4, -4}, {3, -3}}); + + CHECK(map == (HMap({{5, -5}, {1, -1}, {2, -1}, {4, -4}, {3, -3}}))); + } + + TEST_CASE("modify value through iterator") { + // insert x values, modify value of even keys, check values + const std::size_t nb_values = 100; + auto map = + utils::get_filled_hash_map>( + nb_values); + + for (auto it = map.begin(); it != map.end(); it++) { + if (it->first % 2 == 0) { + it->second = -1; + } + } + + for (auto &val : map) { + if (val.first % 2 == 0) { + CHECK_EQ(val.second, -1); + } else { + CHECK_NE(val.second, -1); + } + } + } + + /** * constructor */ -BOOST_AUTO_TEST_CASE(test_extreme_bucket_count_value_construction) { - BOOST_CHECK_THROW( - (dice::sparse_map::sparse_map, std::equal_to, - std::allocator>, - dice::sparse_map::sh::power_of_two_growth_policy<2>>( - std::numeric_limits::max())), - std::length_error); - - BOOST_CHECK_THROW( - (dice::sparse_map::sparse_map, std::equal_to, - std::allocator>, - dice::sparse_map::sh::power_of_two_growth_policy<2>>( - std::numeric_limits::max() / 2 + 1)), - std::length_error); - - BOOST_CHECK_THROW( - (dice::sparse_map::sparse_map, std::equal_to, - std::allocator>, - dice::sparse_map::sh::prime_growth_policy>( - std::numeric_limits::max())), - std::length_error); - - BOOST_CHECK_THROW( - (dice::sparse_map::sparse_map, std::equal_to, - std::allocator>, - dice::sparse_map::sh::prime_growth_policy>( - std::numeric_limits::max() / 2)), - std::length_error); - - BOOST_CHECK_THROW( - (dice::sparse_map::sparse_map, std::equal_to, - std::allocator>, - dice::sparse_map::sh::mod_growth_policy<>>( - std::numeric_limits::max())), - std::length_error); -} - -BOOST_AUTO_TEST_CASE(test_range_construct) { - dice::sparse_map::sparse_map map = {{2, 1}, {1, 0}, {3, 2}}; - - dice::sparse_map::sparse_map map2(map.begin(), map.end()); - dice::sparse_map::sparse_map map3(map.cbegin(), map.cend()); -} - -/** - * operator=(std::initializer_list) - */ -BOOST_AUTO_TEST_CASE(test_assign_operator) { - dice::sparse_map::sparse_map map = {{0, 10}, {-2, 20}}; - BOOST_CHECK_EQUAL(map.size(), 2); - - map = {{1, 3}, {2, 4}}; - BOOST_CHECK_EQUAL(map.size(), 2); - BOOST_CHECK_EQUAL(map.at(1), 3); - BOOST_CHECK_EQUAL(map.at(2), 4); - BOOST_CHECK(map.find(0) == map.end()); - - map = {}; - BOOST_CHECK(map.empty()); -} - -/** - * move/copy constructor/operator - */ -BOOST_AUTO_TEST_CASE(test_move_constructor) { - // insert x values in map, move map into map_move with move constructor, check - // map and map_move, insert additional values in map_move, check map_move - using HMap = dice::sparse_map::sparse_map; - - const std::size_t nb_values = 100; - HMap map = utils::get_filled_hash_map(nb_values); - HMap map_move(std::move(map)); - - BOOST_CHECK(map_move == utils::get_filled_hash_map(nb_values)); - BOOST_CHECK(map == (HMap())); + TEST_CASE("extreme bucket count value construction") { + CHECK_THROWS( + (dice::sparse_map::sparse_map, std::equal_to, + std::allocator>, + dice::sparse_map::power_of_two_growth_policy<2>>( + std::numeric_limits::max()))); + + CHECK_THROWS( + (dice::sparse_map::sparse_map, std::equal_to, + std::allocator>, + dice::sparse_map::power_of_two_growth_policy<2>>( + std::numeric_limits::max() / 2 + 1))); + + CHECK_THROWS( + (dice::sparse_map::sparse_map, std::equal_to, + std::allocator>, + dice::sparse_map::prime_growth_policy>( + std::numeric_limits::max()))); + + CHECK_THROWS( + (dice::sparse_map::sparse_map, std::equal_to, + std::allocator>, + dice::sparse_map::prime_growth_policy>( + std::numeric_limits::max() / 2))); + + CHECK_THROWS( + (dice::sparse_map::sparse_map, std::equal_to, + std::allocator>, + dice::sparse_map::mod_growth_policy<>>( + std::numeric_limits::max()))); + } + + TEST_CASE("range construct") { + dice::sparse_map::sparse_map map = {{2, 1}, {1, 0}, {3, 2}}; + + dice::sparse_map::sparse_map map2(map.begin(), map.end()); + dice::sparse_map::sparse_map map3(map.cbegin(), map.cend()); + } + + /** + * operator=(std::initializer_list) + */ + TEST_CASE("assign op") { + dice::sparse_map::sparse_map map = {{0, 10}, {-2, 20}}; + CHECK_EQ(map.size(), 2); + + map = {{1, 3}, {2, 4}}; + CHECK_EQ(map.size(), 2); + CHECK_EQ(map.at(1), 3); + CHECK_EQ(map.at(2), 4); + CHECK(map.find(0) == map.end()); + + map = {}; + CHECK(map.empty()); + } + + /** + * move/copy constructor/operator + */ + TEST_CASE("move ctor") { + // insert x values in map, move map into map_move with move constructor, check + // map and map_move, insert additional values in map_move, check map_move + using HMap = dice::sparse_map::sparse_map; + + const std::size_t nb_values = 100; + HMap map = utils::get_filled_hash_map(nb_values); + HMap map_move(std::move(map)); + + CHECK(map_move == utils::get_filled_hash_map(nb_values)); + CHECK(map == (HMap())); + + for (std::size_t i = nb_values; i < nb_values * 2; i++) { + map_move.insert( + {utils::get_key(i), utils::get_value(i)}); + } + + CHECK_EQ(map_move.size(), nb_values * 2); + CHECK(map_move == utils::get_filled_hash_map(nb_values * 2)); + } + + TEST_CASE("move ctor empty") { + dice::sparse_map::sparse_map map(0); + dice::sparse_map::sparse_map map_move(std::move(map)); + + CHECK(map.empty()); + CHECK(map_move.empty()); + + CHECK(map.find("") == map.end()); + CHECK(map_move.find("") == map_move.end()); + } + + TEST_CASE("move op") { + // insert x values in map, move map into map_move, check map and map_move, + // insert additional values in map_move, check map_move + using HMap = dice::sparse_map::sparse_map; + + const std::size_t nb_values = 100; + HMap map = utils::get_filled_hash_map(nb_values); + HMap map_move = utils::get_filled_hash_map(1); + map_move = std::move(map); + + CHECK(map_move == utils::get_filled_hash_map(nb_values)); + CHECK(map == (HMap())); + + for (std::size_t i = nb_values; i < nb_values * 2; i++) { + map_move.insert( + {utils::get_key(i), utils::get_value(i)}); + } + + CHECK_EQ(map_move.size(), nb_values * 2); + CHECK(map_move == utils::get_filled_hash_map(nb_values * 2)); + } + + TEST_CASE("move op empty") { + dice::sparse_map::sparse_map map(0); + dice::sparse_map::sparse_map map_move; + map_move = (std::move(map)); + + CHECK(map.empty()); + CHECK(map_move.empty()); + + CHECK(map.find("") == map.end()); + CHECK(map_move.find("") == map_move.end()); + } + + TEST_CASE("reassign moved object move ctor") { + using HMap = dice::sparse_map::sparse_map; + + HMap map = {{"Key1", "Value1"}, {"Key2", "Value2"}, {"Key3", "Value3"}}; + HMap map_move(std::move(map)); + + CHECK_EQ(map_move.size(), 3); + CHECK_EQ(map.size(), 0); + + map = {{"Key4", "Value4"}, {"Key5", "Value5"}}; + CHECK(map == (HMap({{"Key4", "Value4"}, {"Key5", "Value5"}}))); + } + + TEST_CASE("reassign moved object move op") { + using HMap = dice::sparse_map::sparse_map; + + HMap map = {{"Key1", "Value1"}, {"Key2", "Value2"}, {"Key3", "Value3"}}; + HMap map_move = std::move(map); - for (std::size_t i = nb_values; i < nb_values * 2; i++) { - map_move.insert( - {utils::get_key(i), utils::get_value(i)}); - } + CHECK_EQ(map_move.size(), 3); + CHECK_EQ(map.size(), 0); + + map = {{"Key4", "Value4"}, {"Key5", "Value5"}}; + CHECK(map == (HMap({{"Key4", "Value4"}, {"Key5", "Value5"}}))); + } - BOOST_CHECK_EQUAL(map_move.size(), nb_values * 2); - BOOST_CHECK(map_move == utils::get_filled_hash_map(nb_values * 2)); -} - -BOOST_AUTO_TEST_CASE(test_move_constructor_empty) { - dice::sparse_map::sparse_map map(0); - dice::sparse_map::sparse_map map_move(std::move(map)); + TEST_CASE("use after move ctor") { + using HMap = dice::sparse_map::sparse_map; - BOOST_CHECK(map.empty()); - BOOST_CHECK(map_move.empty()); - - BOOST_CHECK(map.find("") == map.end()); - BOOST_CHECK(map_move.find("") == map_move.end()); -} + const std::size_t nb_values = 100; + HMap map = utils::get_filled_hash_map(nb_values); + HMap map_move(std::move(map)); -BOOST_AUTO_TEST_CASE(test_move_operator) { - // insert x values in map, move map into map_move, check map and map_move, - // insert additional values in map_move, check map_move - using HMap = dice::sparse_map::sparse_map; + CHECK(map == (HMap())); + CHECK_EQ(map.size(), 0); + CHECK_EQ(map.bucket_count(), 0); + CHECK_EQ(map.erase("a"), 0); + CHECK(map.find("a") == map.end()); - const std::size_t nb_values = 100; - HMap map = utils::get_filled_hash_map(nb_values); - HMap map_move = utils::get_filled_hash_map(1); - map_move = std::move(map); + for (std::size_t i = 0; i < nb_values; i++) { + map.insert( + {utils::get_key(i), utils::get_value(i)}); + } - BOOST_CHECK(map_move == utils::get_filled_hash_map(nb_values)); - BOOST_CHECK(map == (HMap())); + CHECK_EQ(map.size(), nb_values); + CHECK(map == map_move); + } - for (std::size_t i = nb_values; i < nb_values * 2; i++) { - map_move.insert( - {utils::get_key(i), utils::get_value(i)}); - } + TEST_CASE("use after move op") { + using HMap = dice::sparse_map::sparse_map; - BOOST_CHECK_EQUAL(map_move.size(), nb_values * 2); - BOOST_CHECK(map_move == utils::get_filled_hash_map(nb_values * 2)); -} + const std::size_t nb_values = 100; + HMap map = utils::get_filled_hash_map(nb_values); + HMap map_move(0); + map_move = std::move(map); -BOOST_AUTO_TEST_CASE(test_move_operator_empty) { - dice::sparse_map::sparse_map map(0); - dice::sparse_map::sparse_map map_move; - map_move = (std::move(map)); + CHECK(map == (HMap())); + CHECK_EQ(map.size(), 0); + CHECK_EQ(map.bucket_count(), 0); + CHECK_EQ(map.erase("a"), 0); + CHECK(map.find("a") == map.end()); - BOOST_CHECK(map.empty()); - BOOST_CHECK(map_move.empty()); + for (std::size_t i = 0; i < nb_values; i++) { + map.insert( + {utils::get_key(i), utils::get_value(i)}); + } - BOOST_CHECK(map.find("") == map.end()); - BOOST_CHECK(map_move.find("") == map_move.end()); -} + CHECK_EQ(map.size(), nb_values); + CHECK(map == map_move); + } -BOOST_AUTO_TEST_CASE(test_reassign_moved_object_move_constructor) { - using HMap = dice::sparse_map::sparse_map; + TEST_CASE("copy ctor and op") { + using HMap = dice::sparse_map::sparse_map>; - HMap map = {{"Key1", "Value1"}, {"Key2", "Value2"}, {"Key3", "Value3"}}; - HMap map_move(std::move(map)); + const std::size_t nb_values = 100; + HMap map = utils::get_filled_hash_map(nb_values); - BOOST_CHECK_EQUAL(map_move.size(), 3); - BOOST_CHECK_EQUAL(map.size(), 0); + HMap map_copy = map; + HMap map_copy2(map); + HMap map_copy3 = utils::get_filled_hash_map(1); + map_copy3 = map; + + CHECK(map == map_copy); + map.clear(); + + CHECK(map_copy == map_copy2); + CHECK(map_copy == map_copy3); + } + + TEST_CASE("copy ctor empty") { + dice::sparse_map::sparse_map map(0); + dice::sparse_map::sparse_map map_copy(map); - map = {{"Key4", "Value4"}, {"Key5", "Value5"}}; - BOOST_CHECK(map == (HMap({{"Key4", "Value4"}, {"Key5", "Value5"}}))); + CHECK(map.empty()); + CHECK(map_copy.empty()); + + CHECK(map.find("") == map.end()); + CHECK(map_copy.find("") == map_copy.end()); + } + + TEST_CASE("copy op empty") { + dice::sparse_map::sparse_map map(0); + dice::sparse_map::sparse_map map_copy(16); + map_copy = map; + + CHECK(map.empty()); + CHECK(map_copy.empty()); + + CHECK(map.find("") == map.end()); + CHECK(map_copy.find("") == map_copy.end()); + } + + TEST_CASE("at") { + // insert x values, use at for known and unknown values. + const dice::sparse_map::sparse_map map = {{0, 10}, {-2, 20}}; + + CHECK_EQ(map.at(0), 10); + CHECK_EQ(map.at(-2), 20); + + std::int64_t no_discard_dummy; + CHECK_THROWS_AS(no_discard_dummy = map.at(1), std::out_of_range); + (void) no_discard_dummy; + } + + TEST_CASE("contains") { + const dice::sparse_map::sparse_map map = {{0, 10}, {-2, 20}}; + + CHECK(map.contains(0)); + CHECK(map.contains(-2)); + CHECK(!map.contains(-3)); + } + + TEST_CASE("equal range") { + const dice::sparse_map::sparse_map map = {{0, 10}, {-2, 20}}; + + auto it_pair = map.equal_range(0); + REQUIRE_EQ(std::distance(it_pair.first, it_pair.second), 1); + CHECK_EQ(it_pair.first->second, 10); + + it_pair = map.equal_range(1); + CHECK(it_pair.first == it_pair.second); + CHECK(it_pair.first == map.end()); + } + + TEST_CASE("index op") { + // insert x values, use at for known and unknown values. + dice::sparse_map::sparse_map map = {{0, 10}, {-2, 20}}; + + CHECK_EQ(map[0], 10); + CHECK_EQ(map[-2], 20); + CHECK_EQ(map[2], std::int64_t()); + + CHECK_EQ(map.size(), 3); + } + + /** + * swap + */ + TEST_CASE("swap") { + dice::sparse_map::sparse_map map = {{1, 10}, {8, 80}, {3, 30}}; + dice::sparse_map::sparse_map map2 = {{4, 40}, {5, 50}}; + + using std::swap; + swap(map, map2); + + CHECK(map == + (dice::sparse_map::sparse_map{{4, 40}, {5, 50}})); + CHECK(map2 == (dice::sparse_map::sparse_map{ + {1, 10}, + {8, 80}, + {3, 30}})); + + map.insert({6, 60}); + map2.insert({4, 40}); + + CHECK(map == (dice::sparse_map::sparse_map{ + {4, 40}, + {5, 50}, + {6, 60}})); + CHECK(map2 == (dice::sparse_map::sparse_map{ + {1, 10}, + {8, 80}, + {3, 30}, + {4, 40}})); + } + + TEST_CASE("swap empty") { + dice::sparse_map::sparse_map map = {{1, 10}, {8, 80}, {3, 30}}; + dice::sparse_map::sparse_map map2; + + using std::swap; + swap(map, map2); + + CHECK(map == (dice::sparse_map::sparse_map{})); + CHECK(map2 == (dice::sparse_map::sparse_map{ + {1, 10}, + {8, 80}, + {3, 30}})); + + map.insert({6, 60}); + map2.insert({4, 40}); + + CHECK(map == (dice::sparse_map::sparse_map{{6, 60}})); + CHECK(map2 == (dice::sparse_map::sparse_map{ + {1, 10}, + {8, 80}, + {3, 30}, + {4, 40}})); + } + + TEST_CASE("key equal") { + // Use a KeyEqual and Hash where any odd unsigned number 'x' is equal to + // 'x-1'. Make sure that KeyEqual is called (and not ==). + struct hash { + std::size_t operator()(std::uint64_t v) const { + if (v % 2u == 1u) { + return std::hash()(v - 1); + } else { + return std::hash()(v); + } + } + }; + + struct key_equal { + bool operator()(std::uint64_t lhs, std::uint64_t rhs) const { + if (lhs % 2u == 1u) { + lhs--; + } + + if (rhs % 2u == 1u) { + rhs--; + } + + return lhs == rhs; + } + }; + + dice::sparse_map::sparse_map map; + CHECK(map.insert({2, 10}).second); + CHECK_EQ(map.at(2), 10); + CHECK_EQ(map.at(3), 10); + CHECK(!map.insert({3, 10}).second); + + CHECK_EQ(map.size(), 1); + } + + /** + * other + */ + TEST_CASE("operations with all buckets marked as deleted or with a value") { + // Test find/erase/insert operations on a map which we craft to have all its + // buckets marked as deleted or containing a value to be sure that everything + // works well in this edge case. Intrusive test (it's tightly coupled with the + // implementation of the map). + struct identity_hash { + std::size_t operator()(unsigned int value) const { + return std::size_t(value); + } + }; + + dice::sparse_map::sparse_map, + std::allocator>, + dice::sparse_map::power_of_two_growth_policy<2>, + dice::sparse_map::exception_safety::basic, + dice::sparse_map::sparsity::medium, + std::ratio<8, 10>> map; + map.rehash(64); + + CHECK_EQ(map.bucket_count(), 64); + CHECK_EQ(map.max_load_factor, 0.8f); + + for (unsigned int i = 0; i < 51; i++) { + CHECK(map.insert({i, i}).second); + } + + for (unsigned int i = 0; i < 14; i++) { + CHECK_EQ(map.erase(i), 1); + } + + for (unsigned int i = 51; i < 64; i++) { + CHECK(map.insert({i, i}).second); + } + + CHECK_EQ(map.size(), 50); + CHECK_EQ(map.bucket_count(), 64); + + /** + * Map full of buckets marked as deleted or with a value. Check that find, + * erase and insert operations work well. + */ + + // Find inexistent values. + for (unsigned int i = 0; i < 14; i++) { + CHECK(map.find(i) == map.end()); + } + + // Erase inexistent values. + for (unsigned int i = 0; i < 14; i++) { + CHECK_EQ(map.erase(i), 0); + } + CHECK_EQ(map.size(), 50); + CHECK_EQ(map.bucket_count(), 64); + + // Try to insert existing values. + for (unsigned int i = 14; i < 64; i++) { + CHECK(!map.insert({i, i}).second); + } + CHECK_EQ(map.size(), 50); + CHECK_EQ(map.bucket_count(), 64); + + // Insert new values + for (unsigned int i = 0; i < 14; i++) { + CHECK(map.insert({i, i}).second); + } + CHECK_EQ(map.size(), 64); + CHECK_EQ(map.bucket_count(), 128); + } + + TEST_CASE("heterogeneous lookup") { + struct hash_ptr { + std::size_t operator()(const std::unique_ptr &p) const { + return std::hash()( + reinterpret_cast(p.get())); + } + + std::size_t operator()(std::uintptr_t p) const { + return std::hash()(p); + } + + std::size_t operator()(const int *const &p) const { + return std::hash()(reinterpret_cast(p)); + } + }; + + struct equal_to_ptr { + using is_transparent = std::true_type; + + bool operator()(const std::unique_ptr &p1, + const std::unique_ptr &p2) const { + return p1 == p2; + } + + bool operator()(const std::unique_ptr &p1, std::uintptr_t p2) const { + return reinterpret_cast(p1.get()) == p2; + } + + bool operator()(std::uintptr_t p1, const std::unique_ptr &p2) const { + return p1 == reinterpret_cast(p2.get()); + } + + bool operator()(const std::unique_ptr &p1, + const int *const &p2) const { + return p1.get() == p2; + } + + bool operator()(const int *const &p1, + const std::unique_ptr &p2) const { + return p1 == p2.get(); + } + }; + + std::unique_ptr ptr1(new int(1)); + std::unique_ptr ptr2(new int(2)); + std::unique_ptr ptr3(new int(3)); + int other = -1; + + const std::uintptr_t addr1 = reinterpret_cast(ptr1.get()); + const int *const addr2 = ptr2.get(); + const int *const addr_unknown = &other; + + dice::sparse_map::sparse_map, int, hash_ptr, equal_to_ptr> map; + map.insert({std::move(ptr1), 4}); + map.insert({std::move(ptr2), 5}); + map.insert({std::move(ptr3), 6}); + + CHECK_EQ(map.size(), 3); + + CHECK_EQ(map.at(addr1), 4); + CHECK_EQ(map.at(addr2), 5); + + int no_discard_dummy; + CHECK_THROWS_AS(no_discard_dummy = map.at(addr_unknown), std::out_of_range); + (void) no_discard_dummy; + + REQUIRE(map.find(addr1) != map.end()); + CHECK_EQ(*map.find(addr1)->first, 1); + + REQUIRE(map.find(addr2) != map.end()); + CHECK_EQ(*map.find(addr2)->first, 2); + + CHECK(map.find(addr_unknown) == map.end()); + + CHECK_EQ(map.count(addr1), 1); + CHECK_EQ(map.count(addr2), 1); + CHECK_EQ(map.count(addr_unknown), 0); + + CHECK_EQ(map.erase(addr1), 1); + CHECK_EQ(map.erase(addr2), 1); + CHECK_EQ(map.erase(addr_unknown), 0); + + CHECK_EQ(map.size(), 1); + } + + /** + * Various operations on empty map + */ + TEST_CASE("empty map") { + dice::sparse_map::sparse_map map(0); + + CHECK_EQ(map.bucket_count(), 0); + CHECK_EQ(map.size(), 0); + CHECK_EQ(map.load_factor(), 0); + CHECK(map.empty()); + + CHECK(map.begin() == map.end()); + CHECK(map.begin() == map.cend()); + CHECK(map.cbegin() == map.cend()); + + CHECK(map.find("") == map.end()); + CHECK(map.find("test") == map.end()); + + CHECK_EQ(map.count(""), 0); + CHECK_EQ(map.count("test"), 0); + + CHECK(!map.contains("")); + CHECK(!map.contains("test")); + + int no_discard_dummy; + CHECK_THROWS_AS(no_discard_dummy = map.at(""), std::out_of_range); + CHECK_THROWS_AS(no_discard_dummy = map.at("test"), std::out_of_range); + (void) no_discard_dummy; + + auto range = map.equal_range("test"); + CHECK(range.first == range.second); + + CHECK_EQ(map.erase("test"), 0); + //CHECK(map.erase(map.begin(), map.end()) == map.end()); + + CHECK_EQ(map["new value"], int{}); + } + + TEST_CASE("precalculated hash") { + dice::sparse_map::sparse_map> map = { + {1, -1}, + {2, -2}, + {3, -3}, + {4, -4}, + {5, -5}, + {6, -6}}; + const dice::sparse_map::sparse_map> map_const = map; + + /** + * find + */ + REQUIRE(map.find(3, map.hash_function()(3)) != map.end()); + CHECK_EQ(map.find(3, map.hash_function()(3))->second, -3); + + REQUIRE(map_const.find(3, map_const.hash_function()(3)) != + map_const.end()); + CHECK_EQ(map_const.find(3, map_const.hash_function()(3))->second, + -3); + + /** + * at + */ + CHECK_EQ(map.at(3, map.hash_function()(3)), -3); + CHECK_EQ(map_const.at(3, map_const.hash_function()(3)), -3); + + /** + * contains + */ + CHECK(map.contains(3, map.hash_function()(3))); + CHECK(map_const.contains(3, map_const.hash_function()(3))); + + /** + * count + */ + CHECK_EQ(map.count(3, map.hash_function()(3)), 1); + CHECK_EQ(map_const.count(3, map_const.hash_function()(3)), 1); + + /** + * equal_range + */ + auto it_range = map.equal_range(3, map.hash_function()(3)); + REQUIRE_EQ(std::distance(it_range.first, it_range.second), 1); + CHECK_EQ(it_range.first->second, -3); + + auto it_range_const = map_const.equal_range(3, map_const.hash_function()(3)); + REQUIRE_EQ( + std::distance(it_range_const.first, it_range_const.second), 1); + CHECK_EQ(it_range_const.first->second, -3); + + /** + * erase + */ + CHECK_EQ(map.erase(3, map.hash_function()(3)), 1); + } + + TEST_CASE("insert iterate then remove 10M ints") { + dice::sparse_map::sparse_map m; + std::default_random_engine rng{std::random_device{}()}; + + for (size_t ix = 0; ix < 10'000'000; ++ix) { + (void) m[static_cast(rng())]; + } + + std::cout << "map size: " << m.size() << std::endl; + + size_t sum = 0; + for (auto [x, _] : m) { + sum += x; + } + std::cout << sum << std::endl; + + for (auto it = m.begin(); it != m.end(); ) { + it = m.erase(it); + } + } } - -BOOST_AUTO_TEST_CASE(test_reassign_moved_object_move_operator) { - using HMap = dice::sparse_map::sparse_map; - - HMap map = {{"Key1", "Value1"}, {"Key2", "Value2"}, {"Key3", "Value3"}}; - HMap map_move = std::move(map); - - BOOST_CHECK_EQUAL(map_move.size(), 3); - BOOST_CHECK_EQUAL(map.size(), 0); - - map = {{"Key4", "Value4"}, {"Key5", "Value5"}}; - BOOST_CHECK(map == (HMap({{"Key4", "Value4"}, {"Key5", "Value5"}}))); -} - -BOOST_AUTO_TEST_CASE(test_use_after_move_constructor) { - using HMap = dice::sparse_map::sparse_map; - - const std::size_t nb_values = 100; - HMap map = utils::get_filled_hash_map(nb_values); - HMap map_move(std::move(map)); - - BOOST_CHECK(map == (HMap())); - BOOST_CHECK_EQUAL(map.size(), 0); - BOOST_CHECK_EQUAL(map.bucket_count(), 0); - BOOST_CHECK_EQUAL(map.erase("a"), 0); - BOOST_CHECK(map.find("a") == map.end()); - - for (std::size_t i = 0; i < nb_values; i++) { - map.insert( - {utils::get_key(i), utils::get_value(i)}); - } - - BOOST_CHECK_EQUAL(map.size(), nb_values); - BOOST_CHECK(map == map_move); -} - -BOOST_AUTO_TEST_CASE(test_use_after_move_operator) { - using HMap = dice::sparse_map::sparse_map; - - const std::size_t nb_values = 100; - HMap map = utils::get_filled_hash_map(nb_values); - HMap map_move(0); - map_move = std::move(map); - - BOOST_CHECK(map == (HMap())); - BOOST_CHECK_EQUAL(map.size(), 0); - BOOST_CHECK_EQUAL(map.bucket_count(), 0); - BOOST_CHECK_EQUAL(map.erase("a"), 0); - BOOST_CHECK(map.find("a") == map.end()); - - for (std::size_t i = 0; i < nb_values; i++) { - map.insert( - {utils::get_key(i), utils::get_value(i)}); - } - - BOOST_CHECK_EQUAL(map.size(), nb_values); - BOOST_CHECK(map == map_move); -} - -BOOST_AUTO_TEST_CASE(test_copy_constructor_and_operator) { - using HMap = dice::sparse_map::sparse_map>; - - const std::size_t nb_values = 100; - HMap map = utils::get_filled_hash_map(nb_values); - - HMap map_copy = map; - HMap map_copy2(map); - HMap map_copy3 = utils::get_filled_hash_map(1); - map_copy3 = map; - - BOOST_CHECK(map == map_copy); - map.clear(); - - BOOST_CHECK(map_copy == map_copy2); - BOOST_CHECK(map_copy == map_copy3); -} - -BOOST_AUTO_TEST_CASE(test_copy_constructor_empty) { - dice::sparse_map::sparse_map map(0); - dice::sparse_map::sparse_map map_copy(map); - - BOOST_CHECK(map.empty()); - BOOST_CHECK(map_copy.empty()); - - BOOST_CHECK(map.find("") == map.end()); - BOOST_CHECK(map_copy.find("") == map_copy.end()); -} - -BOOST_AUTO_TEST_CASE(test_copy_operator_empty) { - dice::sparse_map::sparse_map map(0); - dice::sparse_map::sparse_map map_copy(16); - map_copy = map; - - BOOST_CHECK(map.empty()); - BOOST_CHECK(map_copy.empty()); - - BOOST_CHECK(map.find("") == map.end()); - BOOST_CHECK(map_copy.find("") == map_copy.end()); -} - -/** - * at - */ -BOOST_AUTO_TEST_CASE(test_at) { - // insert x values, use at for known and unknown values. - const dice::sparse_map::sparse_map map = {{0, 10}, {-2, 20}}; - - BOOST_CHECK_EQUAL(map.at(0), 10); - BOOST_CHECK_EQUAL(map.at(-2), 20); - BOOST_CHECK_THROW(map.at(1), std::out_of_range); -} - -/** - * contains - */ -BOOST_AUTO_TEST_CASE(test_contains) { - const dice::sparse_map::sparse_map map = {{0, 10}, {-2, 20}}; - - BOOST_CHECK(map.contains(0)); - BOOST_CHECK(map.contains(-2)); - BOOST_CHECK(!map.contains(-3)); -} - -/** - * equal_range - */ -BOOST_AUTO_TEST_CASE(test_equal_range) { - const dice::sparse_map::sparse_map map = {{0, 10}, {-2, 20}}; - - auto it_pair = map.equal_range(0); - BOOST_REQUIRE_EQUAL(std::distance(it_pair.first, it_pair.second), 1); - BOOST_CHECK_EQUAL(it_pair.first->second, 10); - - it_pair = map.equal_range(1); - BOOST_CHECK(it_pair.first == it_pair.second); - BOOST_CHECK(it_pair.first == map.end()); -} - -/** - * operator[] - */ -BOOST_AUTO_TEST_CASE(test_access_operator) { - // insert x values, use at for known and unknown values. - dice::sparse_map::sparse_map map = {{0, 10}, {-2, 20}}; - - BOOST_CHECK_EQUAL(map[0], 10); - BOOST_CHECK_EQUAL(map[-2], 20); - BOOST_CHECK_EQUAL(map[2], std::int64_t()); - - BOOST_CHECK_EQUAL(map.size(), 3); -} - -/** - * swap - */ -BOOST_AUTO_TEST_CASE(test_swap) { - dice::sparse_map::sparse_map map = {{1, 10}, {8, 80}, {3, 30}}; - dice::sparse_map::sparse_map map2 = {{4, 40}, {5, 50}}; - - using std::swap; - swap(map, map2); - - BOOST_CHECK(map == - (dice::sparse_map::sparse_map{{4, 40}, {5, 50}})); - BOOST_CHECK(map2 == (dice::sparse_map::sparse_map{ - {1, 10}, {8, 80}, {3, 30}})); - - map.insert({6, 60}); - map2.insert({4, 40}); - - BOOST_CHECK(map == (dice::sparse_map::sparse_map{ - {4, 40}, {5, 50}, {6, 60}})); - BOOST_CHECK(map2 == (dice::sparse_map::sparse_map{ - {1, 10}, {8, 80}, {3, 30}, {4, 40}})); -} - -BOOST_AUTO_TEST_CASE(test_swap_empty) { - dice::sparse_map::sparse_map map = {{1, 10}, {8, 80}, {3, 30}}; - dice::sparse_map::sparse_map map2; - - using std::swap; - swap(map, map2); - - BOOST_CHECK(map == (dice::sparse_map::sparse_map{})); - BOOST_CHECK(map2 == (dice::sparse_map::sparse_map{ - {1, 10}, {8, 80}, {3, 30}})); - - map.insert({6, 60}); - map2.insert({4, 40}); - - BOOST_CHECK(map == (dice::sparse_map::sparse_map{{6, 60}})); - BOOST_CHECK(map2 == (dice::sparse_map::sparse_map{ - {1, 10}, {8, 80}, {3, 30}, {4, 40}})); -} - -/** - * serialize and deserialize - */ -BOOST_AUTO_TEST_CASE(test_serialize_deserialize_empty) { - // serialize empty map; deserialize in new map; check equal. - // for deserialization, test it with and without hash compatibility. - const dice::sparse_map::sparse_map empty_map(0); - - serializer serial; - empty_map.serialize(serial); - - deserializer dserial(serial.str()); - auto empty_map_deserialized = decltype(empty_map)::deserialize(dserial, true); - BOOST_CHECK(empty_map_deserialized == empty_map); - - deserializer dserial2(serial.str()); - empty_map_deserialized = decltype(empty_map)::deserialize(dserial2, false); - BOOST_CHECK(empty_map_deserialized == empty_map); -} - -BOOST_AUTO_TEST_CASE(test_serialize_deserialize_few) { - // insert x values that fits into one sparse bucket; delete some values; - // serialize map; deserialize in new map; check equal. for deserialization, - // test it with and without hash compatibility. - const dice::sparse_map::sparse_map map{ - {10, 100}, {4, 14}, {9, 201}}; - - serializer serial; - map.serialize(serial); - - deserializer dserial(serial.str()); - auto map_deserialized = decltype(map)::deserialize(dserial, true); - BOOST_CHECK(map_deserialized == map); - - deserializer dserial2(serial.str()); - map_deserialized = decltype(map)::deserialize(dserial2, false); - BOOST_CHECK(map_deserialized == map); -} - -BOOST_AUTO_TEST_CASE(test_serialize_deserialize) { - // insert x values; delete some values; serialize map; deserialize in new map; - // check equal. for deserialization, test it with and without hash - // compatibility. - const std::size_t nb_values = 1000; - - dice::sparse_map::sparse_map map; - for (std::size_t i = 0; i < nb_values + 40; i++) { - map.insert( - {utils::get_key(i), utils::get_value(i)}); - } - - for (std::size_t i = nb_values; i < nb_values + 40; i++) { - map.erase(utils::get_key(i)); - } - BOOST_CHECK_EQUAL(map.size(), nb_values); - - serializer serial; - map.serialize(serial); - - deserializer dserial(serial.str()); - auto map_deserialized = decltype(map)::deserialize(dserial, true); - BOOST_CHECK(map == map_deserialized); - - deserializer dserial2(serial.str()); - map_deserialized = decltype(map)::deserialize(dserial2, false); - BOOST_CHECK(map_deserialized == map); -} - -BOOST_AUTO_TEST_CASE(test_serialize_deserialize_with_different_hash) { - // insert x values; serialize map; deserialize in new map which has a - // different hash; check equal - struct hash_str_diff { - std::size_t operator()(const std::string& str) const { - return std::hash()(str) + 123; - } - }; - - const std::size_t nb_values = 1000; - - dice::sparse_map::sparse_map map; - for (std::size_t i = 0; i < nb_values; i++) { - map.insert( - {utils::get_key(i), utils::get_value(i)}); - } - BOOST_CHECK_EQUAL(map.size(), nb_values); - - serializer serial; - map.serialize(serial); - - deserializer dserial(serial.str()); - auto map_deserialized = - dice::sparse_map::sparse_map::deserialize( - dserial, false); - - BOOST_CHECK_EQUAL(map_deserialized.size(), map.size()); - for (const auto& val : map) { - BOOST_CHECK(map_deserialized.find(val.first) != map_deserialized.end()); - } -} - -/** - * KeyEqual - */ -BOOST_AUTO_TEST_CASE(test_key_equal) { - // Use a KeyEqual and Hash where any odd unsigned number 'x' is equal to - // 'x-1'. Make sure that KeyEqual is called (and not ==). - struct hash { - std::size_t operator()(std::uint64_t v) const { - if (v % 2u == 1u) { - return std::hash()(v - 1); - } else { - return std::hash()(v); - } - } - }; - - struct key_equal { - bool operator()(std::uint64_t lhs, std::uint64_t rhs) const { - if (lhs % 2u == 1u) { - lhs--; - } - - if (rhs % 2u == 1u) { - rhs--; - } - - return lhs == rhs; - } - }; - - dice::sparse_map::sparse_map map; - BOOST_CHECK(map.insert({2, 10}).second); - BOOST_CHECK_EQUAL(map.at(2), 10); - BOOST_CHECK_EQUAL(map.at(3), 10); - BOOST_CHECK(!map.insert({3, 10}).second); - - BOOST_CHECK_EQUAL(map.size(), 1); -} - -/** - * other - */ -BOOST_AUTO_TEST_CASE( - test_operations_with_all_buckets_marked_as_deleted_or_with_a_value) { - // Test find/erase/insert operations on a map which we craft to have all its - // buckets marked as deleted or containing a value to be sure that everything - // works well in this edge case. Intrusive test (it's tightly coupled with the - // implementation of the map). - struct identity_hash { - std::size_t operator()(unsigned int value) const { - return std::size_t(value); - } - }; - - dice::sparse_map::sparse_map map; - map.max_load_factor(0.8f); - map.rehash(64); - - BOOST_CHECK_EQUAL(map.bucket_count(), 64); - BOOST_CHECK_EQUAL(map.max_load_factor(), 0.8f); - - for (unsigned int i = 0; i < 51; i++) { - BOOST_CHECK(map.insert({i, i}).second); - } - - for (unsigned int i = 0; i < 14; i++) { - BOOST_CHECK_EQUAL(map.erase(i), 1); - } - - for (unsigned int i = 51; i < 64; i++) { - BOOST_CHECK(map.insert({i, i}).second); - } - - BOOST_CHECK_EQUAL(map.size(), 50); - BOOST_CHECK_EQUAL(map.bucket_count(), 64); - - /** - * Map full of buckets marked as deleted or with a value. Check that find, - * erase and insert operations work well. - */ - - // Find inexistent values. - for (unsigned int i = 0; i < 14; i++) { - BOOST_CHECK(map.find(i) == map.end()); - } - - // Erase inexistent values. - for (unsigned int i = 0; i < 14; i++) { - BOOST_CHECK_EQUAL(map.erase(i), 0); - } - BOOST_CHECK_EQUAL(map.size(), 50); - BOOST_CHECK_EQUAL(map.bucket_count(), 64); - - // Try to insert existing values. - for (unsigned int i = 14; i < 64; i++) { - BOOST_CHECK(!map.insert({i, i}).second); - } - BOOST_CHECK_EQUAL(map.size(), 50); - BOOST_CHECK_EQUAL(map.bucket_count(), 64); - - // Insert new values - for (unsigned int i = 0; i < 14; i++) { - BOOST_CHECK(map.insert({i, i}).second); - } - BOOST_CHECK_EQUAL(map.size(), 64); - BOOST_CHECK_EQUAL(map.bucket_count(), 128); -} - -BOOST_AUTO_TEST_CASE(test_heterogeneous_lookups) { - struct hash_ptr { - std::size_t operator()(const std::unique_ptr& p) const { - return std::hash()( - reinterpret_cast(p.get())); - } - - std::size_t operator()(std::uintptr_t p) const { - return std::hash()(p); - } - - std::size_t operator()(const int* const& p) const { - return std::hash()(reinterpret_cast(p)); - } - }; - - struct equal_to_ptr { - using is_transparent = std::true_type; - - bool operator()(const std::unique_ptr& p1, - const std::unique_ptr& p2) const { - return p1 == p2; - } - - bool operator()(const std::unique_ptr& p1, std::uintptr_t p2) const { - return reinterpret_cast(p1.get()) == p2; - } - - bool operator()(std::uintptr_t p1, const std::unique_ptr& p2) const { - return p1 == reinterpret_cast(p2.get()); - } - - bool operator()(const std::unique_ptr& p1, - const int* const& p2) const { - return p1.get() == p2; - } - - bool operator()(const int* const& p1, - const std::unique_ptr& p2) const { - return p1 == p2.get(); - } - }; - - std::unique_ptr ptr1(new int(1)); - std::unique_ptr ptr2(new int(2)); - std::unique_ptr ptr3(new int(3)); - int other = -1; - - const std::uintptr_t addr1 = reinterpret_cast(ptr1.get()); - const int* const addr2 = ptr2.get(); - const int* const addr_unknown = &other; - - dice::sparse_map::sparse_map, int, hash_ptr, equal_to_ptr> map; - map.insert({std::move(ptr1), 4}); - map.insert({std::move(ptr2), 5}); - map.insert({std::move(ptr3), 6}); - - BOOST_CHECK_EQUAL(map.size(), 3); - - BOOST_CHECK_EQUAL(map.at(addr1), 4); - BOOST_CHECK_EQUAL(map.at(addr2), 5); - BOOST_CHECK_THROW(map.at(addr_unknown), std::out_of_range); - - BOOST_REQUIRE(map.find(addr1) != map.end()); - BOOST_CHECK_EQUAL(*map.find(addr1)->first, 1); - - BOOST_REQUIRE(map.find(addr2) != map.end()); - BOOST_CHECK_EQUAL(*map.find(addr2)->first, 2); - - BOOST_CHECK(map.find(addr_unknown) == map.end()); - - BOOST_CHECK_EQUAL(map.count(addr1), 1); - BOOST_CHECK_EQUAL(map.count(addr2), 1); - BOOST_CHECK_EQUAL(map.count(addr_unknown), 0); - - BOOST_CHECK_EQUAL(map.erase(addr1), 1); - BOOST_CHECK_EQUAL(map.erase(addr2), 1); - BOOST_CHECK_EQUAL(map.erase(addr_unknown), 0); - - BOOST_CHECK_EQUAL(map.size(), 1); -} - -/** - * Various operations on empty map - */ -BOOST_AUTO_TEST_CASE(test_empty_map) { - dice::sparse_map::sparse_map map(0); - - BOOST_CHECK_EQUAL(map.bucket_count(), 0); - BOOST_CHECK_EQUAL(map.size(), 0); - BOOST_CHECK_EQUAL(map.load_factor(), 0); - BOOST_CHECK(map.empty()); - - BOOST_CHECK(map.begin() == map.end()); - BOOST_CHECK(map.begin() == map.cend()); - BOOST_CHECK(map.cbegin() == map.cend()); - - BOOST_CHECK(map.find("") == map.end()); - BOOST_CHECK(map.find("test") == map.end()); - - BOOST_CHECK_EQUAL(map.count(""), 0); - BOOST_CHECK_EQUAL(map.count("test"), 0); - - BOOST_CHECK(!map.contains("")); - BOOST_CHECK(!map.contains("test")); - - BOOST_CHECK_THROW(map.at(""), std::out_of_range); - BOOST_CHECK_THROW(map.at("test"), std::out_of_range); - - auto range = map.equal_range("test"); - BOOST_CHECK(range.first == range.second); - - BOOST_CHECK_EQUAL(map.erase("test"), 0); - BOOST_CHECK(map.erase(map.begin(), map.end()) == map.end()); - - BOOST_CHECK_EQUAL(map["new value"], int{}); -} - -/** - * Test precalculated hash - */ -BOOST_AUTO_TEST_CASE(test_precalculated_hash) { - dice::sparse_map::sparse_map> map = { - {1, -1}, {2, -2}, {3, -3}, {4, -4}, {5, -5}, {6, -6}}; - const dice::sparse_map::sparse_map> map_const = map; - - /** - * find - */ - BOOST_REQUIRE(map.find(3, map.hash_function()(3)) != map.end()); - BOOST_CHECK_EQUAL(map.find(3, map.hash_function()(3))->second, -3); - - BOOST_REQUIRE(map_const.find(3, map_const.hash_function()(3)) != - map_const.end()); - BOOST_CHECK_EQUAL(map_const.find(3, map_const.hash_function()(3))->second, - -3); - - /** - * at - */ - BOOST_CHECK_EQUAL(map.at(3, map.hash_function()(3)), -3); - BOOST_CHECK_EQUAL(map_const.at(3, map_const.hash_function()(3)), -3); - - /** - * contains - */ - BOOST_CHECK(map.contains(3, map.hash_function()(3))); - BOOST_CHECK(map_const.contains(3, map_const.hash_function()(3))); - - /** - * count - */ - BOOST_CHECK_EQUAL(map.count(3, map.hash_function()(3)), 1); - BOOST_CHECK_EQUAL(map_const.count(3, map_const.hash_function()(3)), 1); - - /** - * equal_range - */ - auto it_range = map.equal_range(3, map.hash_function()(3)); - BOOST_REQUIRE_EQUAL(std::distance(it_range.first, it_range.second), 1); - BOOST_CHECK_EQUAL(it_range.first->second, -3); - - auto it_range_const = map_const.equal_range(3, map_const.hash_function()(3)); - BOOST_REQUIRE_EQUAL( - std::distance(it_range_const.first, it_range_const.second), 1); - BOOST_CHECK_EQUAL(it_range_const.first->second, -3); - - /** - * erase - */ - BOOST_CHECK_EQUAL(map.erase(3, map.hash_function()(3)), 1); -} - -BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/sparse_set_tests.cpp b/tests/sparse_set_tests.cpp index e78cb43..5a51c9a 100644 --- a/tests/sparse_set_tests.cpp +++ b/tests/sparse_set_tests.cpp @@ -21,168 +21,106 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include +#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN +#include + +#include -#include -#include -#include -#include #include -#include #include #include -#include #include "utils.h" -BOOST_AUTO_TEST_SUITE(test_sparse_set) - -using test_types = - boost::mpl::list, - dice::sparse_map::sparse_set, - dice::sparse_map::sparse_set, - dice::sparse_map::sparse_set, - dice::sparse_map::sparse_pg_set, - dice::sparse_map::sparse_set, - std::equal_to, - std::allocator, - dice::sparse_map::sh::prime_growth_policy>, - dice::sparse_map::sparse_set, - std::equal_to, - std::allocator, - dice::sparse_map::sh::mod_growth_policy<>>, - dice::sparse_map::sparse_set, - std::equal_to, - std::allocator, - dice::sparse_map::sh::mod_growth_policy<>>>; - -BOOST_AUTO_TEST_CASE_TEMPLATE(test_insert, HSet, test_types) { - // insert x values, insert them again, check values - using key_t = typename HSet::key_type; - - const std::size_t nb_values = 1000; - HSet set; - typename HSet::iterator it; - bool inserted; - - for (std::size_t i = 0; i < nb_values; i++) { - std::tie(it, inserted) = set.insert(utils::get_key(i)); - - BOOST_CHECK_EQUAL(*it, utils::get_key(i)); - BOOST_CHECK(inserted); - } - BOOST_CHECK_EQUAL(set.size(), nb_values); - - for (std::size_t i = 0; i < nb_values; i++) { - std::tie(it, inserted) = set.insert(utils::get_key(i)); - - BOOST_CHECK_EQUAL(*it, utils::get_key(i)); - BOOST_CHECK(!inserted); - } - - for (std::size_t i = 0; i < nb_values; i++) { - it = set.find(utils::get_key(i)); - - BOOST_CHECK_EQUAL(*it, utils::get_key(i)); - } +#define TEST_SETS dice::sparse_map::sparse_set, \ + dice::sparse_map::sparse_set, \ + dice::sparse_map::sparse_set, \ + dice::sparse_map::sparse_set, \ + dice::sparse_map::sparse_pg_set, \ + dice::sparse_map::sparse_set, \ + std::equal_to, \ + std::allocator, \ + dice::sparse_map::prime_growth_policy>, \ + dice::sparse_map::sparse_set, \ + std::equal_to, \ + std::allocator, \ + dice::sparse_map::mod_growth_policy<>>, \ + dice::sparse_map::sparse_set < move_only_test, std::hash, \ + std::equal_to, \ + std::allocator, \ + dice::sparse_map::mod_growth_policy<>> + +TEST_SUITE("sparse set") { + TEST_CASE_TEMPLATE("insert", HSet, TEST_SETS) { + // insert x values, insert them again, check values + using key_t = typename HSet::key_type; + + const std::size_t nb_values = 1000; + HSet set; + + for (std::size_t i = 0; i < nb_values; i++) { + auto k = utils::get_key(i); + auto [it, inserted] = set.insert(std::move(k)); + + CHECK_EQ(*it, utils::get_key(i)); + CHECK(inserted); + } + CHECK_EQ(set.size(), nb_values); + + for (std::size_t i = 0; i < nb_values; i++) { + auto [it, inserted] = set.insert(utils::get_key(i)); + + CHECK_EQ(*it, utils::get_key(i)); + CHECK(!inserted); + } + + for (std::size_t i = 0; i < nb_values; i++) { + auto it = set.find(utils::get_key(i)); + + CHECK_EQ(*it, utils::get_key(i)); + } + } + + TEST_CASE("compare") { + const dice::sparse_map::sparse_set set1 = {"a", "e", "d", "c", "b"}; + const dice::sparse_map::sparse_set set1_copy = {"e", "c", "b", "a", "d"}; + const dice::sparse_map::sparse_set set2 = {"e", "c", "b", "a", "d", "f"}; + const dice::sparse_map::sparse_set set3 = {"e", "c", "b", "a"}; + const dice::sparse_map::sparse_set set4 = {"a", "e", "d", "c", "z"}; + + CHECK(set1 == set1_copy); + CHECK(set1_copy == set1); + + CHECK(set1 != set2); + CHECK(set2 != set1); + + CHECK(set1 != set3); + CHECK(set3 != set1); + + CHECK(set1 != set4); + CHECK(set4 != set1); + + CHECK(set2 != set3); + CHECK(set3 != set2); + + CHECK(set2 != set4); + CHECK(set4 != set2); + + CHECK(set3 != set4); + CHECK(set4 != set3); + } + + TEST_CASE("insert pointer") { + // Test added mainly to be sure that the code compiles with MSVC + std::string value; + std::string* value_ptr = &value; + + dice::sparse_map::sparse_set set; + set.insert(value_ptr); + set.emplace(value_ptr); + + CHECK_EQ(set.size(), 1); + CHECK_EQ(**set.begin(), value); + } } - -BOOST_AUTO_TEST_CASE(test_compare) { - const dice::sparse_map::sparse_set set1 = {"a", "e", "d", "c", "b"}; - const dice::sparse_map::sparse_set set1_copy = {"e", "c", "b", "a", "d"}; - const dice::sparse_map::sparse_set set2 = {"e", "c", "b", "a", "d", "f"}; - const dice::sparse_map::sparse_set set3 = {"e", "c", "b", "a"}; - const dice::sparse_map::sparse_set set4 = {"a", "e", "d", "c", "z"}; - - BOOST_CHECK(set1 == set1_copy); - BOOST_CHECK(set1_copy == set1); - - BOOST_CHECK(set1 != set2); - BOOST_CHECK(set2 != set1); - - BOOST_CHECK(set1 != set3); - BOOST_CHECK(set3 != set1); - - BOOST_CHECK(set1 != set4); - BOOST_CHECK(set4 != set1); - - BOOST_CHECK(set2 != set3); - BOOST_CHECK(set3 != set2); - - BOOST_CHECK(set2 != set4); - BOOST_CHECK(set4 != set2); - - BOOST_CHECK(set3 != set4); - BOOST_CHECK(set4 != set3); -} - -BOOST_AUTO_TEST_CASE(test_insert_pointer) { - // Test added mainly to be sure that the code compiles with MSVC - std::string value; - std::string* value_ptr = &value; - - dice::sparse_map::sparse_set set; - set.insert(value_ptr); - set.emplace(value_ptr); - - BOOST_CHECK_EQUAL(set.size(), 1); - BOOST_CHECK_EQUAL(**set.begin(), value); -} - -/** - * serialize and deserialize - */ -BOOST_AUTO_TEST_CASE(test_serialize_deserialize_reserve) { - // insert x values values without intermediate resizes; serialize set; - // deserialize in new set; check equal. for deserialization, - // test it with and without hash compatibility. - for (std::size_t nb_values : {0, 1, 3, 17, 1000}) { - dice::sparse_map::sparse_set set; - set.reserve(nb_values); - for (std::size_t i = 0; i < nb_values; i++) { - set.insert(utils::get_key(i)); - } - - serializer serial; - set.serialize(serial); - - deserializer dserial(serial.str()); - auto set_deserialized = decltype(set)::deserialize(dserial, true); - BOOST_CHECK(set == set_deserialized); - - deserializer dserial2(serial.str()); - set_deserialized = decltype(set)::deserialize(dserial2, false); - BOOST_CHECK(set_deserialized == set); - } -} - -BOOST_AUTO_TEST_CASE(test_serialize_deserialize) { - // insert x values; delete some values; serialize set; deserialize in new - // set; check equal. for deserialization, test it with and without hash - // compatibility. - for (std::size_t nb_values : {0, 1, 3, 17, 1000}) { - dice::sparse_map::sparse_set set; - for (std::size_t i = 0; i < nb_values + 40; i++) { - set.insert(utils::get_key(i)); - } - - for (std::size_t i = nb_values; i < nb_values + 40; i++) { - set.erase(utils::get_key(i)); - } - BOOST_CHECK_EQUAL(set.size(), nb_values); - - serializer serial; - set.serialize(serial); - - deserializer dserial(serial.str()); - auto set_deserialized = decltype(set)::deserialize(dserial, true); - BOOST_CHECK(set == set_deserialized); - - deserializer dserial2(serial.str()); - set_deserialized = decltype(set)::deserialize(dserial2, false); - BOOST_CHECK(set_deserialized == set); - } -} - -BOOST_AUTO_TEST_SUITE_END() diff --git a/tsl-sparse-map.natvis b/tsl-sparse-map.natvis deleted file mode 100644 index 876ebfc..0000000 --- a/tsl-sparse-map.natvis +++ /dev/null @@ -1,73 +0,0 @@ - - - - - - - - {{ size={m_ht.m_nb_elements} }} - - m_ht.m_bucket_count - ((float)m_ht.m_nb_elements) / ((float)m_ht.m_bucket_count) - 0 - m_ht.m_max_load_factor - - - - - - m_ht.m_nb_elements - - - - *element - ++element - --num_elements - - - ++bucket - element = bucket->m_values - num_elements = bucket->m_nb_elements - - - - - - - - {{ size={m_ht.m_nb_elements} }} - - m_ht.m_bucket_count - ((float)m_ht.m_nb_elements) / ((float)m_ht.m_bucket_count) - 0 - m_ht.m_max_load_factor - - - - - - m_ht.m_nb_elements - - - - *element - ++element - --num_elements - - - ++bucket - element = bucket->m_values - num_elements = bucket->m_nb_elements - - - - - - - {*m_sparse_array_it} - end - - *m_sparse_array_it - - -