1
0
Fork 0

Try mingw compatibility

FastNoiseSIMD is not usable
mingw
May B. 2020-10-24 17:23:18 +02:00
parent 71a52bd7e3
commit 114833d20c
30 changed files with 463 additions and 151 deletions

View File

@ -114,6 +114,8 @@ uint64_t xgetbv(unsigned int index) {
__asm__ __volatile__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(index));
return ((uint64_t)edx << 32) | eax;
}
#endif
#ifndef _XCR_XFEATURE_ENABLED_MASK
#define _XCR_XFEATURE_ENABLED_MASK 0
#endif

View File

@ -84,9 +84,10 @@ add_library(enet INTERFACE)
target_include_directories(enet INTERFACE include)
add_library(enet_static STATIC ${INCLUDE_FILES} ${SOURCE_FILES})
target_link_libraries(enet_static PUBLIC enet)
if (MINGW)
target_link_libraries(enet_static PUBLIC enet winmm ws2_32)
else()
target_link_libraries(enet_static PUBLIC enet)
endif()
add_library(enet::enet_static ALIAS enet_static)
if (MINGW)
target_link_libraries(enet_static winmm ws2_32)
endif()

View File

@ -6,7 +6,6 @@
// _/_____/
//
// Fast & memory efficient hashtable based on robin hood hashing for C++11/14/17/20
// version 3.8.0
// https://github.com/martinus/robin-hood-hashing
//
// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
@ -36,7 +35,7 @@
// see https://semver.org/
#define ROBIN_HOOD_VERSION_MAJOR 3 // for incompatible API changes
#define ROBIN_HOOD_VERSION_MINOR 8 // for adding functionality in a backwards-compatible manner
#define ROBIN_HOOD_VERSION_MINOR 9 // for adding functionality in a backwards-compatible manner
#define ROBIN_HOOD_VERSION_PATCH 0 // for backwards-compatible bug fixes
#include <algorithm>
@ -51,6 +50,9 @@
#if __cplusplus >= 201703L
# include <string_view>
#endif
#if defined(__aarch64__)
# include <sys/auxv.h> // for getauxval
#endif
// #define ROBIN_HOOD_LOG_ENABLED
#ifdef ROBIN_HOOD_LOG_ENABLED
@ -132,46 +134,32 @@ static Counts& counts() {
#endif
// count leading/trailing bits
#if ((defined __i386 || defined __x86_64__) && defined __BMI__) || defined _M_IX86 || defined _M_X64
#if !defined(ROBIN_HOOD_DISABLE_INTRINSICS)
# ifdef _MSC_VER
# if ROBIN_HOOD(BITNESS) == 32
# define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward
# else
# define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward64
# endif
# include <intrin.h>
# pragma intrinsic(ROBIN_HOOD(BITSCANFORWARD))
# define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) \
[](size_t mask) noexcept -> int { \
unsigned long index; \
return ROBIN_HOOD(BITSCANFORWARD)(&index, mask) ? static_cast<int>(index) \
: ROBIN_HOOD(BITNESS); \
}(x)
# else
# include <x86intrin.h>
# if ROBIN_HOOD(BITNESS) == 32
# define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzl
# define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzl
# else
# define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzll
# define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzll
# endif
# define ROBIN_HOOD_COUNT_LEADING_ZEROES(x) ((x) ? ROBIN_HOOD(CLZ)(x) : ROBIN_HOOD(BITNESS))
# define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) ((x) ? ROBIN_HOOD(CTZ)(x) : ROBIN_HOOD(BITNESS))
# endif
# if ROBIN_HOOD(BITNESS) == 32
# define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() _tzcnt_u32
# else
# define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() _tzcnt_u64
# endif
# if defined __AVX2__ || defined __BMI__
# define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) ROBIN_HOOD(CTZ)(x)
# else
# define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) ROBIN_HOOD(CTZ)(x)
# endif
#elif defined _MSC_VER
# if ROBIN_HOOD(BITNESS) == 32
# define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward
# else
# define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward64
# endif
# include <intrin.h>
# pragma intrinsic(ROBIN_HOOD(BITSCANFORWARD))
# define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) \
[](size_t mask) noexcept -> int { \
unsigned long index; \
return ROBIN_HOOD(BITSCANFORWARD)(&index, mask) ? static_cast<int>(index) \
: ROBIN_HOOD(BITNESS); \
}(x)
#else
# if ROBIN_HOOD(BITNESS) == 32
# define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzl
# define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzl
# else
# define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzll
# define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzll
# endif
# define ROBIN_HOOD_COUNT_LEADING_ZEROES(x) ((x) ? ROBIN_HOOD(CLZ)(x) : ROBIN_HOOD(BITNESS))
# define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) ((x) ? ROBIN_HOOD(CTZ)(x) : ROBIN_HOOD(BITNESS))
#endif
// fallthrough
@ -195,6 +183,17 @@ static Counts& counts() {
# define ROBIN_HOOD_UNLIKELY(condition) __builtin_expect(condition, 0)
#endif
// detect if native wchar_t type is availiable in MSVC
#ifdef _MSC_VER
# ifdef _NATIVE_WCHAR_T_DEFINED
# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 1
# else
# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 0
# endif
#else
# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 1
#endif
// workaround missing "is_trivially_copyable" in g++ < 5.0
// See https://stackoverflow.com/a/31798726/48181
#if defined(__GNUC__) && __GNUC__ < 5
@ -216,6 +215,43 @@ static Counts& counts() {
# define ROBIN_HOOD_PRIVATE_DEFINITION_NODISCARD()
#endif
// detect hardware CRC availability.
#if !defined(ROBIN_HOOD_DISABLE_INTRINSICS)
// only use CRC for 64bit targets
# if ROBIN_HOOD(BITNESS) == 64 && \
(defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32) || defined(_MSC_VER))
# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_CRC32() 1
# if defined(__ARM_NEON) || defined(__ARM_NEON__) || defined(_M_ARM64)
# ifdef _M_ARM64
# include <arm64_neon.h>
# else
# include <arm_acle.h>
# endif
# define ROBIN_HOOD_CRC32_64(crc, v) \
static_cast<uint64_t>( \
__crc32cd(static_cast<uint32_t>(crc), static_cast<uint64_t>(v)))
# define ROBIN_HOOD_CRC32_32(crc, v) \
__crc32cw(static_cast<uint32_t>(crc), static_cast<uint32_t>(v))
# else
# include <nmmintrin.h>
# define ROBIN_HOOD_CRC32_64(crc, v) \
static_cast<uint64_t>( \
_mm_crc32_u64(static_cast<uint64_t>(crc), static_cast<uint64_t>(v)))
# define ROBIN_HOOD_CRC32_32(crc, v) \
_mm_crc32_u32(static_cast<uint32_t>(crc), static_cast<uint32_t>(v))
# endif
# else
# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_CRC32() 0
# endif
# if defined(_MSC_VER)
# include <intrin.h>
# endif
#else
# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_CRC32() 0
#endif
namespace robin_hood {
#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX14)
@ -294,6 +330,13 @@ using index_sequence_for = make_index_sequence<sizeof...(T)>;
namespace detail {
// make sure we static_cast to the correct type for hash_int
#if ROBIN_HOOD(BITNESS) == 64
using SizeT = uint64_t;
#else
using SizeT = uint32_t;
#endif
template <typename T>
T rotr(T x, unsigned k) {
return (x >> k) | (x << (8U * sizeof(T) - k));
@ -388,7 +431,7 @@ public:
void reset() noexcept {
while (mListForFree) {
T* tmp = *mListForFree;
free(mListForFree);
std::free(mListForFree);
mListForFree = reinterpret_cast_no_cast_align_warning<T**>(tmp);
}
mHead = nullptr;
@ -423,7 +466,7 @@ public:
// calculate number of available elements in ptr
if (numBytes < ALIGNMENT + ALIGNED_SIZE) {
// not enough data for at least one element. Free and return.
free(ptr);
std::free(ptr);
} else {
add(ptr, numBytes);
}
@ -490,7 +533,7 @@ private:
// alloc new memory: [prev |T, T, ... T]
// std::cout << (sizeof(T*) + ALIGNED_SIZE * numElementsToAlloc) << " bytes" << std::endl;
size_t const bytes = ALIGNMENT + ALIGNED_SIZE * numElementsToAlloc;
add(assertNotNull<std::bad_alloc>(malloc(bytes)), bytes);
add(assertNotNull<std::bad_alloc>(std::malloc(bytes)), bytes);
return mHead;
}
@ -526,7 +569,7 @@ struct NodeAllocator<T, MinSize, MaxSize, true> {
// we are not using the data, so just free it.
void addOrFree(void* ptr, size_t ROBIN_HOOD_UNUSED(numBytes) /*unused*/) noexcept {
free(ptr);
std::free(ptr);
}
};
@ -670,7 +713,23 @@ inline constexpr bool operator>=(pair<A, B> const& x, pair<A, B> const& y) {
return !(x < y);
}
inline size_t hash_bytes(void const* ptr, size_t const len) noexcept {
namespace detail {
static size_t fallback_hash_int(uint64_t x) noexcept {
// inspired by lemire's strongly universal hashing
// https://lemire.me/blog/2018/08/15/fast-strongly-universal-64-bit-hashing-everywhere/
//
// Instead of shifts, we use rotations so we don't lose any bits.
//
// Added a final multiplcation with a constant for more mixing. It is most important that
// the lower bits are well mixed.
auto h1 = x * UINT64_C(0xA24BAED4963EE407);
auto h2 = detail::rotr(x, 32U) * UINT64_C(0x9FB21C651E98DF25);
auto h = detail::rotr(h1 + h2, 32U);
return static_cast<size_t>(h);
}
static size_t fallback_hash_bytes(void const* ptr, size_t const len) noexcept {
static constexpr uint64_t m = UINT64_C(0xc6a4a7935bd1e995);
static constexpr uint64_t seed = UINT64_C(0xe17a1465);
static constexpr unsigned int r = 47;
@ -724,29 +783,248 @@ inline size_t hash_bytes(void const* ptr, size_t const len) noexcept {
return static_cast<size_t>(h);
}
#if ROBIN_HOOD(HAS_CRC32)
# ifndef _M_ARM64
// see e.g.
// https://github.com/simdjson/simdjson/blob/9863f62321f59d73c7731d4ada2d7c4ed6a0a251/src/isadetection.h
static inline void cpuid(uint32_t* eax, uint32_t* ebx, uint32_t* ecx, uint32_t* edx) {
# if defined(_MSC_VER)
int cpuInfo[4];
__cpuid(cpuInfo, static_cast<int>(*eax));
*eax = static_cast<uint32_t>(cpuInfo[0]);
*ebx = static_cast<uint32_t>(cpuInfo[1]);
*ecx = static_cast<uint32_t>(cpuInfo[2]);
*edx = static_cast<uint32_t>(cpuInfo[3]);
# else
uint32_t a = *eax;
uint32_t b{};
uint32_t c = *ecx;
uint32_t d{};
// NOLINTNEXTLINE(hicpp-no-assembler)
asm volatile("cpuid\n\t" : "+a"(a), "=b"(b), "+c"(c), "=d"(d));
*eax = a;
*ebx = b;
*ecx = c;
*edx = d;
# endif
}
# endif
inline bool hasCrc32Support() noexcept {
# if defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64)
uint32_t eax{};
uint32_t ebx{};
uint32_t ecx{};
uint32_t edx{};
// EBX for EAX=0x1
eax = 0x1;
cpuid(&eax, &ebx, &ecx, &edx);
// check SSE4.2
return 0U != (ecx & (1U << 20U));
# elif defined(__aarch64__)
auto hwcap = getauxval(AT_HWCAP);
if (hwcap != ENOENT) {
// HWCAP_CRC32 is not necessarily defined, so hardcode it.
// see https://github.com/torvalds/linux/blob/master/arch/arm64/include/uapi/asm/hwcap.h
return (hwcap & (1U << 7U)) != 0;
}
# elif defined(_M_ARM64)
return true;
# endif
return false;
}
inline size_t hash_bytes_1_to_16(void const* ptr, size_t len, uint64_t seed) noexcept {
// random odd 64bit constants
static constexpr uint64_t c1 = UINT64_C(0x38a3affe8230452c);
static constexpr uint64_t c2 = UINT64_C(0xd55c04dccfde5383);
auto const* d8 = reinterpret_cast<uint8_t const*>(ptr);
if (len > 8) {
// 9-16 bytes
auto h1 = ROBIN_HOOD_CRC32_64(seed, detail::unaligned_load<uint64_t>(d8));
auto h2 = ROBIN_HOOD_CRC32_64(seed, detail::unaligned_load<uint64_t>(d8 + len - 8));
return h1 * c1 + h2 * c2;
}
uint64_t input{};
if (len <= 4) {
uint64_t a = d8[0]; // 0, 0, 0, 0
uint64_t b = d8[(len - 1) / 2]; // 0, 0, 1, 1
uint64_t c = d8[len / 2]; // 0, 1, 1, 2
uint64_t d = d8[len - 1]; // 0, 1, 2, 3
input = (a << 24U) | (b << 16U) | (c << 8U) | d;
} else {
// 5-8 bytes
uint64_t a = detail::unaligned_load<uint32_t>(d8);
uint64_t b = detail::unaligned_load<uint32_t>(d8 + len - 4);
input = (a << 32U) | b;
}
return ROBIN_HOOD_CRC32_64(seed, input) * c1;
}
inline size_t hash_bytes_8_to_xxx(void const* ptr, size_t len, uint64_t seed) {
auto const* d8 = reinterpret_cast<uint8_t const*>(ptr);
static constexpr auto bs = 128U;
uint64_t h1 = seed;
uint64_t h2 = seed;
uint64_t h3 = seed;
uint64_t h4 = seed;
auto next = d8;
auto numBlocks = (len - 1) / bs;
auto end = d8 + numBlocks * bs;
while (next != end) {
h1 = ROBIN_HOOD_CRC32_64(h1, detail::unaligned_load<uint64_t>(next + 0U));
h2 = ROBIN_HOOD_CRC32_64(h2, detail::unaligned_load<uint64_t>(next + 8U));
h3 = ROBIN_HOOD_CRC32_64(h3, detail::unaligned_load<uint64_t>(next + 16U));
h4 = ROBIN_HOOD_CRC32_64(h4, detail::unaligned_load<uint64_t>(next + 24U));
h1 = ROBIN_HOOD_CRC32_64(h1, detail::unaligned_load<uint64_t>(next + 32U + 0U));
h2 = ROBIN_HOOD_CRC32_64(h2, detail::unaligned_load<uint64_t>(next + 32U + 8U));
h3 = ROBIN_HOOD_CRC32_64(h3, detail::unaligned_load<uint64_t>(next + 32U + 16U));
h4 = ROBIN_HOOD_CRC32_64(h4, detail::unaligned_load<uint64_t>(next + 32U + 24U));
h1 = ROBIN_HOOD_CRC32_64(h1, detail::unaligned_load<uint64_t>(next + 64U + 0U));
h2 = ROBIN_HOOD_CRC32_64(h2, detail::unaligned_load<uint64_t>(next + 64U + 8U));
h3 = ROBIN_HOOD_CRC32_64(h3, detail::unaligned_load<uint64_t>(next + 64U + 16U));
h4 = ROBIN_HOOD_CRC32_64(h4, detail::unaligned_load<uint64_t>(next + 64U + 24U));
h1 = ROBIN_HOOD_CRC32_64(h1, detail::unaligned_load<uint64_t>(next + 96U + 0U));
h2 = ROBIN_HOOD_CRC32_64(h2, detail::unaligned_load<uint64_t>(next + 96U + 8U));
h3 = ROBIN_HOOD_CRC32_64(h3, detail::unaligned_load<uint64_t>(next + 96U + 16U));
h4 = ROBIN_HOOD_CRC32_64(h4, detail::unaligned_load<uint64_t>(next + 96U + 24U));
next += bs;
}
auto remainingBytes = len - (numBlocks * bs);
auto numBlocks8 = (remainingBytes + 7U) / 8U;
end += numBlocks8 * 8;
switch (numBlocks8) {
case 16:
h1 = ROBIN_HOOD_CRC32_64(h1, detail::unaligned_load<uint64_t>(end - 128U));
ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
case 15:
h2 = ROBIN_HOOD_CRC32_64(h2, detail::unaligned_load<uint64_t>(end - 120U));
ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
case 14:
h3 = ROBIN_HOOD_CRC32_64(h3, detail::unaligned_load<uint64_t>(end - 112U));
ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
case 13:
h4 = ROBIN_HOOD_CRC32_64(h4, detail::unaligned_load<uint64_t>(end - 104U));
ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
case 12:
h1 = ROBIN_HOOD_CRC32_64(h1, detail::unaligned_load<uint64_t>(end - 96U));
ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
case 11:
h2 = ROBIN_HOOD_CRC32_64(h2, detail::unaligned_load<uint64_t>(end - 88U));
ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
case 10:
h3 = ROBIN_HOOD_CRC32_64(h3, detail::unaligned_load<uint64_t>(end - 80U));
ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
case 9:
h4 = ROBIN_HOOD_CRC32_64(h4, detail::unaligned_load<uint64_t>(end - 72U));
ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
case 8:
h1 = ROBIN_HOOD_CRC32_64(h1, detail::unaligned_load<uint64_t>(end - 64U));
ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
case 7:
h2 = ROBIN_HOOD_CRC32_64(h2, detail::unaligned_load<uint64_t>(end - 56U));
ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
case 6:
h3 = ROBIN_HOOD_CRC32_64(h3, detail::unaligned_load<uint64_t>(end - 48U));
ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
case 5:
h4 = ROBIN_HOOD_CRC32_64(h4, detail::unaligned_load<uint64_t>(end - 40U));
ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
case 4:
h1 = ROBIN_HOOD_CRC32_64(h1, detail::unaligned_load<uint64_t>(end - 32U));
ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
case 3:
h2 = ROBIN_HOOD_CRC32_64(h2, detail::unaligned_load<uint64_t>(end - 24U));
ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
case 2:
h3 = ROBIN_HOOD_CRC32_64(h3, detail::unaligned_load<uint64_t>(end - 16U));
ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH
default:
// make sure that we don't skip past the real length with the last one
h4 = ROBIN_HOOD_CRC32_64(h4, detail::unaligned_load<uint64_t>(d8 + len - 8U));
break;
}
// how best to combine h1 to h4? Multiplying and summing with a random odd number seems to be
// very fast and leads to few collisions.
return h1 * 0x38a3affe8230452c + h2 * 0xd55c04dccfde5383 + h3 * 0xd348c89fbf80760f +
h4 * 0xdc105301318a46f3;
}
#endif
} // namespace detail
inline size_t hash_bytes(void const* ptr, size_t len) noexcept {
if (len == 0) {
return 0;
}
#if ROBIN_HOOD(HAS_CRC32) && ROBIN_HOOD(BITNESS) == 64
static bool const hasCrc = detail::hasCrc32Support();
if (ROBIN_HOOD_LIKELY(hasCrc)) {
auto seed = len * UINT64_C(0xf012a09363e97a8f);
if (len <= 16U) {
return detail::hash_bytes_1_to_16(ptr, len, seed);
}
return detail::hash_bytes_8_to_xxx(ptr, len, seed);
}
#endif
return detail::fallback_hash_bytes(ptr, len);
}
inline size_t hash_int(uint64_t x) noexcept {
// inspired by lemire's strongly universal hashing
// https://lemire.me/blog/2018/08/15/fast-strongly-universal-64-bit-hashing-everywhere/
//
// Instead of shifts, we use rotations so we don't lose any bits.
//
// Added a final multiplcation with a constant for more mixing. It is most important that the
// lower bits are well mixed.
auto h1 = x * UINT64_C(0xA24BAED4963EE407);
auto h2 = detail::rotr(x, 32U) * UINT64_C(0x9FB21C651E98DF25);
auto h = detail::rotr(h1 + h2, 32U);
return static_cast<size_t>(h);
#if ROBIN_HOOD(HAS_CRC32)
static bool const hasCrc = detail::hasCrc32Support();
if (ROBIN_HOOD_LIKELY(hasCrc)) {
# if ROBIN_HOOD(BITNESS) == 64
// rotr 32 results in bad hash, when hash_int is applied twice.
return ROBIN_HOOD_CRC32_64(0, x ^ UINT64_C(0xA24BAED4963EE407)) ^
(ROBIN_HOOD_CRC32_64(0, x) << 32U);
# else
return ROBIN_HOOD_CRC32_32(ROBIN_HOOD_CRC32_32(0, static_cast<uint32_t>(x)),
static_cast<uint32_t>(x >> 32U));
# endif
}
#endif
return detail::fallback_hash_int(x);
}
inline size_t hash_int(uint32_t x) noexcept {
#if ROBIN_HOOD(HAS_CRC32)
static bool const hasCrc = detail::hasCrc32Support();
if (ROBIN_HOOD_LIKELY(hasCrc)) {
// rotr 32 results in bad hash, when hash_int is applied twice.
return ROBIN_HOOD_CRC32_32(0, x);
}
#endif
return detail::fallback_hash_int(x);
}
// A thin wrapper around std::hash, performing an additional simple mixing step of the result.
template <typename T>
template <typename T, typename Enable = void>
struct hash : public std::hash<T> {
size_t operator()(T const& obj) const
noexcept(noexcept(std::declval<std::hash<T>>().operator()(std::declval<T const&>()))) {
// call base hash
auto result = std::hash<T>::operator()(obj);
// return mixed of that, to be save against identity has
return hash_int(static_cast<uint64_t>(result));
return hash_int(static_cast<detail::SizeT>(result));
}
};
@ -769,30 +1047,40 @@ struct hash<std::basic_string_view<CharT>> {
template <class T>
struct hash<T*> {
size_t operator()(T* ptr) const noexcept {
return hash_int(reinterpret_cast<size_t>(ptr));
return hash_int(reinterpret_cast<detail::SizeT>(ptr));
}
};
template <class T>
struct hash<std::unique_ptr<T>> {
size_t operator()(std::unique_ptr<T> const& ptr) const noexcept {
return hash_int(reinterpret_cast<size_t>(ptr.get()));
return hash_int(reinterpret_cast<detail::SizeT>(ptr.get()));
}
};
template <class T>
struct hash<std::shared_ptr<T>> {
size_t operator()(std::shared_ptr<T> const& ptr) const noexcept {
return hash_int(reinterpret_cast<size_t>(ptr.get()));
return hash_int(reinterpret_cast<detail::SizeT>(ptr.get()));
}
};
#define ROBIN_HOOD_HASH_INT(T) \
template <> \
struct hash<T> { \
size_t operator()(T const& obj) const noexcept { \
return hash_int(static_cast<uint64_t>(obj)); \
} \
template <typename Enum>
struct hash<Enum, typename std::enable_if<std::is_enum<Enum>::value>::type> {
size_t operator()(Enum e) const noexcept {
using Underlying = typename std::underlying_type<Enum>::type;
return hash<Underlying>{}(static_cast<Underlying>(e));
}
};
#define ROBIN_HOOD_HASH_INT(T) \
template <> \
struct hash<T> { \
size_t operator()(T const& obj) const noexcept { \
using Type = \
std::conditional<sizeof(T) <= sizeof(uint32_t), uint32_t, uint64_t>::type; \
return hash_int(static_cast<Type>(obj)); \
} \
}
#if defined(__GNUC__) && !defined(__clang__)
@ -806,7 +1094,9 @@ ROBIN_HOOD_HASH_INT(signed char);
ROBIN_HOOD_HASH_INT(unsigned char);
ROBIN_HOOD_HASH_INT(char16_t);
ROBIN_HOOD_HASH_INT(char32_t);
#if ROBIN_HOOD(HAS_NATIVE_WCHART)
ROBIN_HOOD_HASH_INT(wchar_t);
#endif
ROBIN_HOOD_HASH_INT(short);
ROBIN_HOOD_HASH_INT(unsigned short);
ROBIN_HOOD_HASH_INT(int);
@ -914,7 +1204,8 @@ private:
static constexpr size_t InitialNumElements = sizeof(uint64_t);
static constexpr uint32_t InitialInfoNumBits = 5;
static constexpr uint8_t InitialInfoInc = 1U << InitialInfoNumBits;
static constexpr uint8_t InitialInfoHashShift = sizeof(size_t) * 8 - InitialInfoNumBits;
static constexpr size_t InfoMask = InitialInfoInc - 1U;
static constexpr uint8_t InitialInfoHashShift = 0;
using DataPool = detail::NodeAllocator<value_type, 4, 16384, IsFlat>;
// type needs to be wider than uint8_t.
@ -1247,7 +1538,7 @@ private:
Iter operator++(int) noexcept {
Iter tmp = *this;
++(*this);
return std::move(tmp);
return tmp;
}
reference operator*() const {
@ -1278,13 +1569,29 @@ private:
mInfo += sizeof(size_t);
mKeyVals += sizeof(size_t);
}
#if ROBIN_HOOD(LITTLE_ENDIAN)
auto inc = ROBIN_HOOD_COUNT_TRAILING_ZEROES(n) / 8;
#if defined(ROBIN_HOOD_DISABLE_INTRINSICS)
// we know for certain that within the next 8 bytes we'll find a non-zero one.
if (ROBIN_HOOD_UNLIKELY(0U == detail::unaligned_load<uint32_t>(mInfo))) {
mInfo += 4;
mKeyVals += 4;
}
if (ROBIN_HOOD_UNLIKELY(0U == detail::unaligned_load<uint16_t>(mInfo))) {
mInfo += 2;
mKeyVals += 2;
}
if (ROBIN_HOOD_UNLIKELY(0U == *mInfo)) {
mInfo += 1;
mKeyVals += 1;
}
#else
# if ROBIN_HOOD(LITTLE_ENDIAN)
auto inc = ROBIN_HOOD_COUNT_TRAILING_ZEROES(n) / 8;
# else
auto inc = ROBIN_HOOD_COUNT_LEADING_ZEROES(n) / 8;
#endif
# endif
mInfo += inc;
mKeyVals += inc;
#endif
}
friend class Table<IsFlat, MaxLoadFactor100, key_type, mapped_type, hasher, key_equal>;
@ -1306,10 +1613,11 @@ private:
typename std::conditional<std::is_same<::robin_hood::hash<key_type>, hasher>::value,
::robin_hood::detail::identity_hash<size_t>,
::robin_hood::hash<size_t>>::type;
*idx = Mix{}(WHash::operator()(key));
*info = mInfoInc + static_cast<InfoType>(*idx >> mInfoHashShift);
*idx &= mMask;
// the lower InitialInfoNumBits are reserved for info.
auto h = Mix{}(WHash::operator()(key));
*info = mInfoInc + static_cast<InfoType>((h & InfoMask) >> mInfoHashShift);
*idx = (h >> InitialInfoNumBits) & mMask;
}
// forwards the index by one, wrapping around at the end
@ -1535,7 +1843,7 @@ public:
auto const numElementsWithBuffer = calcNumElementsWithBuffer(o.mMask + 1);
mKeyVals = static_cast<Node*>(detail::assertNotNull<std::bad_alloc>(
malloc(calcNumBytesTotal(numElementsWithBuffer))));
std::malloc(calcNumBytesTotal(numElementsWithBuffer))));
// no need for calloc because clonData does memcpy
mInfo = reinterpret_cast<uint8_t*>(mKeyVals + numElementsWithBuffer);
mNumElements = o.mNumElements;
@ -1583,12 +1891,12 @@ public:
// no luck: we don't have the same array size allocated, so we need to realloc.
if (0 != mMask) {
// only deallocate if we actually have data!
free(mKeyVals);
std::free(mKeyVals);
}
auto const numElementsWithBuffer = calcNumElementsWithBuffer(o.mMask + 1);
mKeyVals = static_cast<Node*>(detail::assertNotNull<std::bad_alloc>(
malloc(calcNumBytesTotal(numElementsWithBuffer))));
std::malloc(calcNumBytesTotal(numElementsWithBuffer))));
// no need for calloc here because cloneData performs a memcpy.
mInfo = reinterpret_cast<uint8_t*>(mKeyVals + numElementsWithBuffer);
@ -2103,7 +2411,7 @@ private:
// calloc also zeroes everything
mKeyVals = reinterpret_cast<Node*>(detail::assertNotNull<std::bad_alloc>(
calloc(1, calcNumBytesTotal(numElementsWithBuffer))));
std::calloc(1, calcNumBytesTotal(numElementsWithBuffer))));
mInfo = reinterpret_cast<uint8_t*>(mKeyVals + numElementsWithBuffer);
// set sentinel
@ -2290,7 +2598,7 @@ private:
// reports a compile error: attempt to free a non-heap object fm
// [-Werror=free-nonheap-object]
if (mKeyVals != reinterpret_cast_no_cast_align_warning<Node*>(&mMask)) {
free(mKeyVals);
std::free(mKeyVals);
}
}

View File

@ -165,7 +165,7 @@ void Client::run(server_handle* const localHandle) {
reports.tris_count += pass(buffer, model, glm::vec4(pos, std::get<1>(area)), std::get<2>(area));
};
if (options.culling > 0) {
state.contouring->getModels(draw, player.position, options.camera.far, occlusion, offset, options.voxel_density, true);
state.contouring->getModels(draw, player.position, options.camera.farDist, occlusion, offset, options.voxel_density, true);
} else {
state.contouring->getModels(draw, frustum, offset, options.voxel_density, true);
}
@ -177,7 +177,7 @@ void Client::run(server_handle* const localHandle) {
reports.tris_count += pass(buffer, model, glm::vec4(pos, std::get<1>(area)), std::get<2>(area));
};
if (options.culling > 0) {
state.contouring->getModels(draw, player.position, options.camera.far, occlusion, offset, options.voxel_density, false);
state.contouring->getModels(draw, player.position, options.camera.farDist, occlusion, offset, options.voxel_density, false);
} else {
state.contouring->getModels(draw, frustum, offset, options.voxel_density, false);
}

View File

@ -54,8 +54,8 @@ public:
contouring = config["contouring"].value_or(std::string(""));
camera.far = config["camera"]["far"].value_or(camera.far);
camera.near = config["camera"]["near"].value_or(camera.near);
camera.farDist = config["camera"]["far"].value_or(camera.farDist);
camera.nearDist = config["camera"]["near"].value_or(camera.nearDist);
camera.fov = config["camera"]["fov"].value_or(camera.fov);
control.sensibility = config["control"]["sensibility"].value_or(control.sensibility);
control.speed = config["control"]["speed"].value_or(control.speed);
@ -127,8 +127,8 @@ public:
}));
config.insert_or_assign("contouring", contouring);
config.insert_or_assign("camera", toml::table({
{"far", camera.far},
{"near", camera.near},
{"far", camera.farDist},
{"near", camera.nearDist},
{"fov", camera.fov}
}));
config.insert_or_assign("control", toml::table({

View File

@ -36,7 +36,7 @@ namespace contouring {
loadedLevels.push_back(LEVELS[i]);
}
for (size_t i = 1; i <= std::max<uint>(1, std::thread::hardware_concurrency() / 2 - 1); i++) {
for (size_t i = 1; i <= std::max<uint32_t>(1, std::thread::hardware_concurrency() / 2 - 1); i++) {
workers.emplace_back([&] {
#if TRACY_ENABLE
tracy::SetThreadName("Contouring");

View File

@ -54,8 +54,8 @@ namespace contouring {
void enqueue(const area_<chunk_pos> &, const chunk_pos &offset, const world::ChunkContainer &);
ushort loadDistance = 3;
ushort keepDistance = 4;
uint16_t loadDistance = 3;
uint16_t keepDistance = 4;
bool transparency = false;
float iso = .1f;
bool manifold = true;

View File

@ -21,7 +21,7 @@ namespace dualmc {
typedef float VertexComponentsType;
typedef uint PropertyType;
typedef uint32_t PropertyType;
typedef uint32_t QuadIndexType;
typedef uint32_t TriIndexType;
@ -101,7 +101,7 @@ public:
Point const *data,
int32_t const dimX, int32_t const dimY, int32_t const dimZ,
VolumeDataType const iso,
ushort const *textures_map,
uint16_t const *textures_map,
float const *roughness,
bool const generateManifold,
std::vector<Vertex> &vertices,
@ -188,7 +188,7 @@ protected:
Point const *data;
/// point to vertex property table
ushort const *textures_map;
uint16_t const *textures_map;
/// property roughness table
float const *roughness;
@ -543,7 +543,7 @@ void DualMC<T>::buildTris(
Point const * data,
int32_t const dimX, int32_t const dimY, int32_t const dimZ,
VolumeDataType const iso,
ushort const * textures_map,
uint16_t const * textures_map,
float const * roughness,
bool const generateManifold,
std::vector<Vertex> & vertices,

View File

@ -9,7 +9,7 @@ Camera::Camera(const Controllable* origin, const Camera::options& opt): origin(o
Camera::~Camera() { }
void Camera::updateProjection() {
ProjectionMatrix = glm::perspective(o.fov, Window::RATIO, o.near, o.far);
ProjectionMatrix = glm::perspective(o.fov, Window::RATIO, o.nearDist, o.farDist);
}
void Camera::update() {

View File

@ -9,8 +9,8 @@ class Camera {
public:
struct options {
float fov = glm::radians(70.f);
float near = 0.1;
float far = 64;
float nearDist = 0.1f;
float farDist = 64.f;
};
Camera(const Controllable*, const options&);
@ -26,11 +26,11 @@ public:
}
inline geometry::Frustum getFrustum() const { return geometry::Frustum(ViewMatrix, ProjectionMatrix); }
inline geometry::Ray getRay() const { return geometry::Ray(origin->position, origin->getDirection(), o.far); }
inline geometry::Ray getRay() const { return geometry::Ray(origin->position, origin->getDirection(), o.farDist); }
constexpr glm::mat4 getViewMatrix() const { return ViewMatrix; }
constexpr glm::mat4 getProjectionMatrix() const { return ProjectionMatrix; }
constexpr float getDepth() const { return o.far; }
constexpr float getDepth() const { return o.farDist; }
private:
const Controllable* origin;

View File

@ -20,7 +20,7 @@ UI::UI() {
for(auto file: std::filesystem::directory_iterator("content/textures/")) {
if(file.is_directory() && file.path().filename() != "ui")
texturePacks.push_back(file.path().filename());
texturePacks.push_back(file.path().filename().string());
}
}
UI::~UI() {
@ -187,17 +187,17 @@ UI::Actions UI::draw(config::client::options &options, state::state &state, cons
{
bool changePerspective = false;
changePerspective |= ImGui::SliderAngle("FoV", &options.camera.fov, 30, 110);
changePerspective |= ImGui::SliderFloat("Near", &options.camera.near, 0.01, 10);
changePerspective |= ImGui::SliderFloat("Far", &options.camera.far, farRange.first / options.voxel_density, farRange.second / options.voxel_density);
changePerspective |= ImGui::SliderFloat("Near", &options.camera.nearDist, 0.01, 10);
changePerspective |= ImGui::SliderFloat("Far", &options.camera.farDist, farRange.first / options.voxel_density, farRange.second / options.voxel_density);
if(changePerspective) {
actions |= Actions::Camera;
}
}
ImGui::End();
}
const auto far = std::clamp(options.camera.far, farRange.first / options.voxel_density, farRange.second / options.voxel_density);
if(far != options.camera.far) {
options.camera.far = far;
const auto farDist = std::clamp(options.camera.farDist, farRange.first / options.voxel_density, farRange.second / options.voxel_density);
if(farDist != options.camera.farDist) {
options.camera.farDist = farDist;
actions |= Actions::Camera;
}
}

View File

@ -66,7 +66,7 @@ std::optional<Image::properties> Image::Read(const std::string& imagepath, std::
return {};
}
//FIXME: miplevels with size < block size (2 last) are corrupted
const uint maxMipmapLevels = 1 + std::floor(std::log2(std::max(info.size.height, info.size.width))) - 2;
const uint32_t maxMipmapLevels = 1 + std::floor(std::log2(std::max(info.size.height, info.size.width))) - 2;
info.mipmapLevels = std::min(maxMipmapLevels, info.mipmapLevels);
return info;

View File

@ -116,7 +116,7 @@ std::unique_ptr<TextureCube> TextureCube::LoadFromFiles(const std::array<std::st
glCreateTextures(GL_TEXTURE_CUBE_MAP, 1, &textureID);
glTextureStorage2D(textureID, 1, format, header.size.width, header.size.height);
ushort layer = 0;
uint16_t layer = 0;
for (auto imagepath = paths.begin(); imagepath != paths.end(); ++imagepath, ++layer) {
data.clear();
if (!render::Image::Read(*imagepath, data).has_value()) {
@ -163,7 +163,7 @@ std::unique_ptr<TextureArray> TextureArray::LoadFromFiles(const std::vector<std:
glCreateTextures(GL_TEXTURE_2D_ARRAY, 1, &textureID);
glTextureStorage3D(textureID, header.mipmapLevels, format, header.size.width, header.size.height, paths.size());
ushort layer = 0;
uint16_t layer = 0;
for (auto imagepath = paths.begin(); imagepath != paths.end(); ++imagepath, ++layer) {
data.clear();
if (!render::Image::Read(*imagepath, data).has_value()) {
@ -172,7 +172,7 @@ std::unique_ptr<TextureArray> TextureArray::LoadFromFiles(const std::vector<std:
GLuint subTextureID = createImage(req, data);
auto width = header.size.width;
auto height = header.size.height;
for (uint level = 0; level < header.mipmapLevels; level++) {
for (uint32_t level = 0; level < header.mipmapLevels; level++) {
glCopyImageSubData(subTextureID, GL_TEXTURE_2D, level, 0, 0, 0, textureID, GL_TEXTURE_2D_ARRAY, level, 0, 0, layer, width, height, 1);
width /= 2;
height /= 2;

View File

@ -297,9 +297,9 @@ bool Renderer::Load(Window& window, const renderOptions& opt, const windowOption
std::vector<VkPhysicalDevice> devices(deviceCount);
vkEnumeratePhysicalDevices(instance, &deviceCount, devices.data());
uint bestScore = 0;
uint32_t bestScore = 0;
for(const auto& device: devices) {
uint score = 1;
uint32_t score = 1;
auto infos = PhysicalDeviceInfo(window.getPtr(), device, surface, windOpt.getSamples(), windOpt.targetFPS < Window::MIN_FPS);
{

View File

@ -47,8 +47,8 @@ namespace world::client {
chunk_pos last_chunk = chunk_pos(INT_MAX);
ushort loadDistance;
ushort keepDistance;
ushort serverDistance;
uint16_t loadDistance;
uint16_t keepDistance;
uint16_t serverDistance;
};
}

View File

@ -2,7 +2,7 @@
#include "math.hpp"
using namespace glm;
ifvec3::ifvec3(const llvec3 &pos, uint density) {
ifvec3::ifvec3(const llvec3 &pos, uint32_t density) {
const auto d = IDX_LENGTH2 * density;
raw = glm::divide(pos, glm::uvec3(d));
offset = glm::vec3(rem(pos.x, d), rem(pos.y, d), rem(pos.z, d));
@ -22,6 +22,6 @@ void ifvec3::center() {
double ifvec3::dist(const ifvec3& p) const {
return glm::length(glm::dvec3(raw - p.raw)) + glm::length(offset - p.offset);
}
ifvec3 ifvec3::divide(uint m) const {
ifvec3 ifvec3::divide(uint32_t m) const {
return ifvec3(glm::divide(raw, glm::ucvec3(m)), glm::divide(offset, glm::uvec3(m)), false);
}

View File

@ -1,11 +1,12 @@
#pragma once
#include <glm/glm.hpp>
#include <sys/types.h>
namespace glm {
typedef vec<3, long long> llvec3;
typedef vec<3, long> lvec3;
typedef vec<3, ushort> usvec3;
typedef vec<3, glm::u16> usvec3;
typedef vec<3, unsigned char> ucvec3;
const auto IDX_LENGTH = 32;
@ -23,7 +24,7 @@ namespace glm {
ifvec3(const raw_t &raw, const offset_t &offset, bool recenter = true) : raw(raw), offset(offset) {
if(recenter) center();
}
ifvec3(const glm::llvec3 &pos, uint density = 1);
ifvec3(const glm::llvec3 &pos, uint32_t density = 1);
raw_t raw;
offset_t offset;
@ -33,7 +34,7 @@ namespace glm {
glm::llvec3 raw_as_long() const;
double dist(const ifvec3 &p) const;
ifvec3 divide(uint m = IDX_LENGTH) const;
ifvec3 divide(uint32_t m = IDX_LENGTH) const;
inline const ifvec3 &operator+=(const offset_t &v) {
offset += v;

View File

@ -26,13 +26,13 @@ namespace glm {
return glm::abs(glm::abs(a) - glm::abs(b));
}
constexpr uint inline rem(long long value, uint m) {
constexpr uint32_t inline rem(long long value, uint32_t m) {
return value < 0 ? ((value+1) % (long long)m) + m - 1 : value % (long long)m;
}
constexpr long inline div(long long value, uint m) {
constexpr long inline div(long long value, uint32_t m) {
return value < 0 ? ((value+1) / (long long)m) - 1 : value / (long long)m;
}
constexpr float inline div(float value, uint m) {
constexpr float inline div(float value, uint32_t m) {
return value < 0 ? ((value+1) / m) - 1 : value / m;
}
constexpr ucvec3 inline modulo(const llvec3& value, const ucvec3& m = ucvec3(IDX_LENGTH)) {

View File

@ -40,7 +40,7 @@ enum class server_packet_type: enet_uint8 {
/// {area_<chunk_pos>, zstd<chunk rle>} reliable
CHUNK = 17,
/// Chunk changes
/// {area_id, {chunk_pos, ushort(count), Chunk::Edit[]}[]} notify
/// {area_id, {chunk_pos, uint16_t(count), Chunk::Edit[]}[]} notify
/// FIXME: to big !!! MAYBE: compress
EDITS = 18,
@ -57,7 +57,7 @@ enum class server_packet_type: enet_uint8 {
/// zstd dict reliable
COMPRESSION = 24,
/// Server capabilities
/// ushort(loadDistance), MAYBE: more reliable
/// uint16_t(loadDistance), MAYBE: more reliable
CAPABILITIES = 25,
/// Public chat message

View File

@ -8,9 +8,9 @@ using namespace world;
Chunk::Chunk(std::istream& str, bool rle) {
if(rle) {
ushort i = 0;
uint16_t i = 0;
while(!str.eof()) {
ushort count;
uint16_t count;
Voxel voxel;
str.read(reinterpret_cast<char *>(&count), sizeof(count));
str.read(reinterpret_cast<char *>(&voxel), sizeof(voxel));

View File

@ -30,7 +30,7 @@ std::optional<Faces> EdittableChunk::update(float deltaTime, bool animate) {
}
}
void EdittableChunk::invalidate(ushort idx) {
void EdittableChunk::invalidate(uint16_t idx) {
invalidate(
((!getNeighborIdx(idx, Face::Up).has_value()) & Faces::Up) |
((!getNeighborIdx(idx, Face::Down).has_value()) & Faces::Down) |

View File

@ -15,9 +15,9 @@ namespace world {
/// Distance management
struct options {
/// Radius in chunks to load if missing
ushort loadDistance = 5;
uint16_t loadDistance = 5;
/// Radius in chunks to keep in memory
ushort keepDistance = 6;
uint16_t keepDistance = 6;
};
/// Universe voxel ray intersection

View File

@ -28,7 +28,7 @@ namespace world {
return (value & 0b0111'1111'1111'1000) >> 3;
}
/// Texture idx
constexpr inline ushort texture() const {
constexpr inline uint16_t texture() const {
return materials::textures_map[material()];
}

View File

@ -11,7 +11,7 @@ namespace world::materials {
std::string texture;
float roughness;
bool solid;
//ushort break_to
//uint16_t break_to
};
//MAYBE: index name enum
@ -28,7 +28,7 @@ namespace world::materials {
/// Materials names
static const std::array<std::string, count> names = {{"Air", "Dirt", "Grass", "Sand", "Rock", "Wall", "Path", "Alien metal", "Water"}};
/// Materials textures
static const std::array<ushort, count> textures_map = {{0, 2, 9, 1, 7, 6, 3, 8, 12}};
static const std::array<uint16_t, count> textures_map = {{0, 2, 9, 1, 7, 6, 3, 8, 12}};
/// Materials roughness.
/// -1: slope, 0: normal, 1: cube
static const std::array<float, count> roughness = {{0, 0, 0, 0, 0, 0, -1, .8, 0}};

View File

@ -16,7 +16,7 @@ Chunk::~Chunk() { }
void Chunk::write(std::ostream& str, bool rle) const {
if (rle) {
const auto *it = voxels.begin();
ushort counter = 1;
uint16_t counter = 1;
Voxel current = *it;
while(true) {
++it;
@ -40,7 +40,7 @@ void Chunk::write(std::ostream& str, bool rle) const {
}
}
void Chunk::set(ushort idx, const Voxel& val) {
void Chunk::set(uint16_t idx, const Voxel& val) {
modified = modified || (voxels[idx].value != val.value);
voxels[idx] = val;
}

View File

@ -63,7 +63,7 @@ Universe::Universe(const Universe::options &options): host(options.connection, o
}
// Workers
for (size_t i = 0; i < std::max<uint>(1, std::thread::hardware_concurrency() / 2 - 1); i++) {
for (size_t i = 0; i < std::max<uint32_t>(1, std::thread::hardware_concurrency() / 2 - 1); i++) {
workers.emplace_back([&] {
#if TRACY_ENABLE
tracy::SetThreadName("Chunks");

View File

@ -103,8 +103,8 @@ namespace world::server {
using save_task_t = std::pair<area_it_t, robin_hood::pair<chunk_pos, std::shared_ptr<world::server::Chunk>>>;
data::safe_queue<save_task_t> saveQueue; //NOTE: consider Area and Chunk const
ushort loadDistance;
ushort keepDistance;
uint16_t loadDistance;
uint16_t keepDistance;
std::string folderPath;
net::Server host;

View File

@ -24,7 +24,7 @@ void FileRegion::load() {
return;
}
// Read header
ushort chunkCount; //NOTE: pretty useless
uint16_t chunkCount; //NOTE: pretty useless
file.read(reinterpret_cast<char *>(&chunkCount), sizeof(chunkCount));
while (!file.eof()) {
@ -35,12 +35,12 @@ void FileRegion::load() {
file.read(reinterpret_cast<char *>(&pos.z), sizeof(region_chunk_pos::value_type));
//NOTE: align uchar pos
if constexpr (sizeof(region_chunk_pos) % sizeof(ushort) != 0) {
if constexpr (sizeof(region_chunk_pos) % sizeof(uint16_t) != 0) {
file.ignore(1);
}
// Read size
ushort size = 0;
uint16_t size = 0;
file.read(reinterpret_cast<char *>(&size), sizeof(size));
// Ignore content
@ -104,7 +104,7 @@ void FileRegion::save(std::optional<std::pair<region_chunk_pos, std::unique_ptr<
}
{ // Write header
ushort size = index.size() + (added.has_value() ? 1 : 0);
uint16_t size = index.size() + (added.has_value() ? 1 : 0);
tmpFile.write(reinterpret_cast<char *>(&size), sizeof(size));
}
@ -118,7 +118,7 @@ void FileRegion::save(std::optional<std::pair<region_chunk_pos, std::unique_ptr<
}
//NOTE: align uchar pos
if constexpr (sizeof(region_chunk_pos) % sizeof(ushort) != 0) {
if constexpr (sizeof(region_chunk_pos) % sizeof(uint16_t) != 0) {
tmpFile.put(0);
//MAYBE: store usefull uchar flags
}
@ -142,7 +142,7 @@ void FileRegion::save(std::optional<std::pair<region_chunk_pos, std::unique_ptr<
}
//NOTE: align uchar pos
if constexpr (sizeof(region_chunk_pos) % sizeof(ushort) != 0) {
if constexpr (sizeof(region_chunk_pos) % sizeof(uint16_t) != 0) {
tmpFile.put(0);
//MAYBE: store usefull uchar flags
}

View File

@ -27,7 +27,7 @@ namespace world::server {
std::shared_mutex mutex;
std::ifstream file;
robin_hood::unordered_map<region_chunk_pos, std::pair<ushort, std::streampos>> index;
robin_hood::unordered_map<region_chunk_pos, std::pair<uint16_t, std::streampos>> index;
void load();
};

View File

@ -33,7 +33,7 @@ void MemoryRegion::load() {
}
// Read header
ushort chunkCount; //NOTE: pretty useless
uint16_t chunkCount; //NOTE: pretty useless
file.read(reinterpret_cast<char *>(&chunkCount), sizeof(chunkCount));
while (!file.eof()) {
@ -44,12 +44,12 @@ void MemoryRegion::load() {
file.read(reinterpret_cast<char *>(&pos.z), sizeof(region_chunk_pos::value_type));
//NOTE: align uchar pos
if constexpr (sizeof(region_chunk_pos) % sizeof(ushort) != 0) {
if constexpr (sizeof(region_chunk_pos) % sizeof(uint16_t) != 0) {
file.ignore(1);
}
// Read size
ushort size = 0;
uint16_t size = 0;
file.read(reinterpret_cast<char *>(&size), sizeof(size));
// Read content
@ -129,13 +129,13 @@ void MemoryRegion::save(bool force) {
}
{ // Write header
ushort size = (ushort)content.size();
uint16_t size = (uint16_t)content.size();
file.write(reinterpret_cast<char *>(&size), sizeof(size));
}
for(const auto& chunk: content) {
assert(chunk.second->size() < USHRT_MAX);
auto size = (ushort)chunk.second->size();
auto size = (uint16_t)chunk.second->size();
const auto out = chunk.second->data();
{ // Write pos
@ -146,7 +146,7 @@ void MemoryRegion::save(bool force) {
}
//NOTE: align uchar pos
if constexpr (sizeof(region_chunk_pos) % sizeof(ushort) != 0) {
if constexpr (sizeof(region_chunk_pos) % sizeof(uint16_t) != 0) {
file.put(0);
//MAYBE: store usefull uchar flags
}