1
0
Fork 0

Compare commits

...

2 Commits

Author SHA1 Message Date
May B. 3ea83d40ac Memory budget and validation logs 2020-10-01 14:35:21 +02:00
May B. d5f1bba4d8 Better allocator and logging 2020-09-30 23:08:17 +02:00
13 changed files with 317 additions and 115 deletions

View File

@ -5,6 +5,8 @@ option(PROFILING "Build with profiling" 0)
option(FIXED_WINDOW "Lock window size: Force floating on i3" 0)
set(SIMD_LEVEL "avx2" CACHE STRING "SIMD processor acceleration (sse2, sse4.1, avx2, avx512f)")
option(USE_FMA "Use fma" 1)
option(LOG_DEBUG "Show debug logs" 0)
option(LOG_TRACE "Show trace logs" 0)
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release)
@ -23,7 +25,7 @@ add_subdirectory("include/glm")
add_subdirectory("include/enet")
add_subdirectory("include/zstd")
add_compile_definitions(FIXED_WINDOW=${FIXED_WINDOW} HN_USE_FILESYSTEM=1)
add_compile_definitions(FIXED_WINDOW=${FIXED_WINDOW} LOG_DEBUG=${LOG_DEBUG} LOG_TRACE=${LOG_TRACE} HN_USE_FILESYSTEM=1)
if(PROFILING)
add_compile_definitions(TRACY_ENABLE=1)
endif(PROFILING)

View File

@ -7,12 +7,12 @@ namespace render {
bool Load(Window& window, bool preferVulkan, const renderOptions& options) {
if(!preferVulkan) {
LOG_D("Trying OpenGL");
LOG_T("Trying OpenGL");
if(gl::Renderer::Load(window, options))
return true;
window.destroy();
}
LOG_D("Trying Vulkan");
LOG_T("Trying Vulkan");
if(vk::Renderer::Load(window, options))
return true;
window.destroy();

View File

@ -7,12 +7,21 @@
using namespace render::vk;
constexpr auto HOST_EASILY_WRITABLE = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
constexpr VkDeviceSize MIN_ALLOC_SIZE = 1 << 28;
const auto NO_DELETER = Allocator::MemoryDeleter(nullptr);
Allocator::memory_ptr Allocator::GetNull() { return Allocator::memory_ptr(nullptr, NO_DELETER); }
Allocator::Allocator(VkDevice device, const PhysicalDeviceInfo &info) : device(device) {
vkGetPhysicalDeviceMemoryProperties(info.device, &properties);
Allocator::Allocator(VkDevice device, const PhysicalDeviceInfo &info): physicalDevice(info.device), device(device) {
if(info.hasMemoryBudget()) {
properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2;
properties2.pNext = &budget;
budget.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT;
} else {
LOG_W("No memory budget. Process may go out of memory.");
}
updateProperties();
{
if (!info.queueIndices.transferFamily.has_value()) {
LOG_W("No transfer queue family. Using graphics one");
@ -54,11 +63,11 @@ void Allocator::setTracyZone(const char* name) {
(void)name;
}
Allocator::memory_ptr Allocator::createBuffer(VkDeviceSize size, VkMemoryPropertyFlags properties, VkBufferUsageFlags usage, buffer_info& out) {
Allocator::memory_ptr Allocator::createBuffer(const buffer_requirement& requirement, VkMemoryPropertyFlags properties, buffer_info& out) {
VkBufferCreateInfo bufferInfo{};
bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferInfo.size = size;
bufferInfo.usage = usage;
bufferInfo.size = requirement.size;
bufferInfo.usage = requirement.usage;
bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (vkCreateBuffer(device, &bufferInfo, ALLOC, &out.buffer) != VK_SUCCESS) {
@ -70,12 +79,28 @@ Allocator::memory_ptr Allocator::createBuffer(VkDeviceSize size, VkMemoryPropert
VkMemoryRequirements memRequirements;
vkGetBufferMemoryRequirements(device, out.buffer, &memRequirements);
if (auto memory = allocate(memRequirements, properties)) {
if(vkBindBufferMemory(device, out.buffer, memory->ref, memory->offset) == VK_SUCCESS)
return memory;
auto memory = allocate(memRequirements, properties);
if (!memory || vkBindBufferMemory(device, out.buffer, memory->ref, memory->offset) != VK_SUCCESS) {
LOG_E("Failed to allocate buffer memory");
return GetNull();
}
LOG_E("Failed to allocate buffer memory");
return GetNull();
if (requirement.size != 0 && requirement.data != nullptr) {
if (memory->ptr != nullptr) {
memory->write(requirement.data, requirement.data_size, requirement.data_offset);
} else {
Allocator::buffer_info stagingBuffer;
if(auto stagingMemory = createBuffer({requirement.size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT}, HOST_EASILY_WRITABLE, stagingBuffer)) {
stagingMemory->write(requirement.data, requirement.data_size, requirement.data_offset);
copyBuffer(stagingBuffer, out, requirement.size);
vkDestroyBuffer(device, stagingBuffer.buffer, ALLOC); //TODO: move to buffer destructor
} else {
FATAL("Cannot allocate staging memory");
return GetNull();
}
}
}
return memory;
}
Allocator::memory_ptr Allocator::createBuffers(const std::vector<buffer_requirement>& requirements, VkMemoryPropertyFlags properties, std::vector<buffer_info>& out) {
assert(!requirements.empty());
@ -118,31 +143,110 @@ Allocator::memory_ptr Allocator::createBuffers(const std::vector<buffer_requirem
out.pop_back();
// Bind memory
if (auto memory = allocate(memRequirements, properties)) {
for (size_t i = 0; i < requirements.size(); i++) {
if (vkBindBufferMemory(device, out[i].buffer, memory->ref, memory->offset + out[i].offset) != VK_SUCCESS) {
LOG_E("Failed to bind buffer");
auto memory = allocate(memRequirements, properties);
if (!memory) {
LOG_E("Failed to allocate buffers");
return GetNull();
}
for (size_t i = 0; i < requirements.size(); i++) {
if (vkBindBufferMemory(device, out[i].buffer, memory->ref, memory->offset + out[i].offset) != VK_SUCCESS) {
LOG_E("Failed to bind buffer");
return GetNull();
}
}
VkDeviceSize stagingSize = 0;
for (auto& requirement: requirements)
if (requirement.data != nullptr)
stagingSize = std::max(stagingSize, requirement.size);
// Copy datas
if (stagingSize != 0) {
if (memory->ptr != nullptr) {
for (size_t i = 0; i < requirements.size(); i++) {
if (requirements[i].data != nullptr && requirements[i].size != 0) {
assert(requirements[i].data_size + requirements[i].data_offset <= requirements[i].size);
memory->write(requirements[i].data, requirements[i].data_size, out[i].offset + requirements[i].data_offset);
}
}
} else {
Allocator::buffer_info stagingBuffer;
if(auto stagingMemory = createBuffer({stagingSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT}, HOST_EASILY_WRITABLE, stagingBuffer)) {
for (size_t i = 0; i < requirements.size(); i++) {
if (requirements[i].data != nullptr && requirements[i].size != 0) {
assert(requirements[i].data_size + requirements[i].data_offset <= requirements[i].size);
stagingMemory->write(requirements[i].data, requirements[i].data_size, requirements[i].data_offset);
copyBuffer(stagingBuffer, out[i], requirements[i].size);
}
}
vkDestroyBuffer(device, stagingBuffer.buffer, ALLOC); //TODO: move to buffer destructor
} else {
FATAL("Cannot allocate staging memory");
return GetNull();
}
}
return memory;
}
LOG_E("Failed to allocate buffers");
return GetNull();
return memory;
}
void Allocator::updateProperties() {
if (hasBudget()) {
vkGetPhysicalDeviceMemoryProperties2(physicalDevice, &properties2);
#if LOG_TRACE
LOG_T("Available heaps:")
for (size_t i = 0; i < getProperties().memoryHeapCount; i++) {
LOG_T('\t' << i << ": " << budget.heapUsage[i] << '/' << budget.heapBudget[i]);
}
#endif
} else {
vkGetPhysicalDeviceMemoryProperties(physicalDevice, &properties);
}
}
Allocator::memory_ptr Allocator::allocate(VkMemoryRequirements requirements, VkMemoryPropertyFlags properties) {
//TODO: search for existing allocation
//TODO: allocate more ???
// Search in existing allocations
for (auto& alloc: allocations) {
if ((requirements.memoryTypeBits & (1 << alloc->memoryType)) &&
(getProperties().memoryTypes[alloc->memoryType].propertyFlags & properties) == properties &&
alloc->size > requirements.size
) {
VkDeviceSize start = 0;
auto aligned = [&](VkDeviceSize offset) {
if (offset % requirements.alignment == 0)
return offset;
return offset + requirements.alignment - (offset % requirements.alignment);
};
auto it = alloc->areas.cbegin();
auto done = [&] {
alloc->areas.insert(it, {requirements.size, start});
return memory_ptr(new memory_area{alloc->memory, requirements.size, start, alloc->ptr != nullptr ? alloc->ptr + start : nullptr}, alloc->deleter);
};
while (it != alloc->areas.cend()) {
if (it->offset - start > requirements.size) {
return done();
}
start = aligned(it->offset + it->size);
++it;
}
if (alloc->size - start > requirements.size) {
return done();
}
}
}
LOG_T("Need to allocate more");
VkMemoryAllocateInfo allocInfo{};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = requirements.size;
if (const auto memIdx = findMemory(requirements.memoryTypeBits, properties, requirements.size)) {
//TODO: check budget
allocInfo.allocationSize = std::max(MIN_ALLOC_SIZE, requirements.size);
if (const auto memIdx = findMemory(requirements.memoryTypeBits, properties, allocInfo.allocationSize)) {
allocInfo.memoryTypeIndex = memIdx.value();
} else if (const auto memIdx = findMemory(requirements.memoryTypeBits, properties, requirements.size)) {
LOG_W("Memory heavily limited cannot allocate full page");
allocInfo.allocationSize = requirements.size;
allocInfo.memoryTypeIndex = memIdx.value();
} else {
LOG_E("No suitable memory heap");
LOG_E("No suitable memory heap within memory budget");
LOG_D(requirements.memoryTypeBits << ' ' << properties << ' ' << requirements.size);
return GetNull();
}
@ -153,12 +257,12 @@ Allocator::memory_ptr Allocator::allocate(VkMemoryRequirements requirements, VkM
}
void *ptr = nullptr;
if (properties & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
if ((getProperties().memoryTypes[allocInfo.memoryTypeIndex].propertyFlags & HOST_EASILY_WRITABLE) == HOST_EASILY_WRITABLE) {
vkMapMemory(device, memory, 0, VK_WHOLE_SIZE, 0, &ptr);
}
auto allocation = allocations.emplace_back(new Allocation(device, memory, allocInfo.allocationSize, allocInfo.memoryTypeIndex, ptr)).get();
allocation->areas.push_back({allocInfo.allocationSize, 0});
allocation->areas.push_back({requirements.size, 0});
return memory_ptr(new memory_area{memory, requirements.size, 0, ptr}, allocation->deleter);
}
@ -190,30 +294,35 @@ void Allocator::copyBuffer(buffer_info src, buffer_info dst, VkDeviceSize size)
vkResetCommandBuffer(transferBuffer, 0);
}
std::optional<uint32_t> Allocator::findMemory(uint32_t typeFilter, VkMemoryPropertyFlags requirement, VkDeviceSize size) const {
#if DEBUG
LOG_D("available memory:");
for (uint32_t i = 0; i < properties.memoryTypeCount; i++) {
LOG_D('\t' << i << ": " << ((properties.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) ? "local " : "")
<< ((properties.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) ? "visible " : "")
<< ((properties.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) ? "coherent " : "")
<< ((properties.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) ? "cached " : "")
<< ((properties.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) ? "lazy " : "")
<< ((properties.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_PROTECTED_BIT) ? "protected " : "")
<< properties.memoryHeaps[properties.memoryTypes[i].heapIndex].size);
std::optional<uint32_t> Allocator::findMemory(uint32_t typeFilter, VkMemoryPropertyFlags requirement, VkDeviceSize size) {
updateProperties();
#if LOG_TRACE
LOG_T("Available memory:");
for (uint32_t i = 0; i < getProperties().memoryTypeCount; i++) {
LOG_T('\t' << i << ": "
<< getProperties().memoryTypes[i].heapIndex << ' '
<< ((getProperties().memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) ? "local " : "")
<< ((getProperties().memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) ? "visible " : "")
<< ((getProperties().memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) ? "coherent " : "")
<< ((getProperties().memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) ? "cached " : "")
<< ((getProperties().memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) ? "lazy " : "")
<< ((getProperties().memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_PROTECTED_BIT) ? "protected " : "")
<< getProperties().memoryHeaps[getProperties().memoryTypes[i].heapIndex].size);
}
#endif
for (uint32_t i = 0; i < properties.memoryTypeCount; i++) {
if ((typeFilter & (1 << i)) && (properties.memoryTypes[i].propertyFlags & requirement) == requirement) {
for (uint32_t i = 0; i < getProperties().memoryTypeCount; i++) {
if ((typeFilter & (1 << i)) && (getProperties().memoryTypes[i].propertyFlags & requirement) == requirement) {
VkDeviceSize usage = size;
for(const auto& alloc: allocations) {
if(alloc->memoryType == i)
usage += alloc->size;
}
VkDeviceSize budget = properties.memoryHeaps[properties.memoryTypes[i].heapIndex].size;
//TODO: use memory budjet extension
if(budget >= usage) {
const auto heapIndex = getProperties().memoryTypes[i].heapIndex;
const VkDeviceSize heapSize = getProperties().memoryHeaps[heapIndex].size;
if (heapSize >= usage && (!hasBudget() || budget.heapBudget[heapIndex] >= budget.heapUsage[heapIndex] + size)) {
return i;
} else {
LOG_T("Out of budget " << usage << '/' << heapSize << " : " << budget.heapUsage[heapIndex] + size << '/' << budget.heapBudget[heapIndex]);
}
}
}
@ -232,6 +341,7 @@ void Allocator::MemoryDeleter::operator()(memory_area* area) {
if(it->offset == area->offset) {
assert(it->size == area->size);
owner->areas.erase(it);
//MAYBE: remove if empty
delete area;
return;
}

View File

@ -45,14 +45,15 @@ public:
VkBuffer buffer = nullptr;
VkDeviceSize offset = 0;
};
memory_ptr createBuffer(VkDeviceSize, VkMemoryPropertyFlags, VkBufferUsageFlags, buffer_info&);
struct buffer_requirement {
VkDeviceSize size;
VkBufferUsageFlags usage;
const void *data = nullptr;
VkDeviceSize data_size = 0;
VkDeviceSize data_offset = 0;
};
memory_ptr createBuffer(const buffer_requirement&, VkMemoryPropertyFlags, buffer_info&);
memory_ptr createBuffers(const std::vector<buffer_requirement> &, VkMemoryPropertyFlags, std::vector<buffer_info> &);
//TODO: create Buffer{MemoryArea + VkBuffer}
//TODO: create readonly buffer with data
void copyBuffer(buffer_info srcBuffer, buffer_info dstBuffer, VkDeviceSize size);
@ -61,7 +62,10 @@ public:
static memory_ptr GetNull();
private:
std::optional<uint32_t> findMemory(uint32_t, VkMemoryPropertyFlags, VkDeviceSize size = 0) const;
std::optional<uint32_t> findMemory(uint32_t, VkMemoryPropertyFlags, VkDeviceSize size = 0);
constexpr bool hasBudget() const { return properties2.pNext != nullptr; }
constexpr const VkPhysicalDeviceMemoryProperties &getProperties() const { return hasBudget() ? properties2.memoryProperties : properties; }
void updateProperties();
struct Allocation {
Allocation(VkDevice, VkDeviceMemory, VkDeviceSize, uint32_t, void *ptr);
@ -71,15 +75,18 @@ private:
const VkDeviceMemory memory;
const VkDeviceSize size;
const uint32_t memoryType;
const void *ptr = nullptr;
void *const ptr = nullptr;
const MemoryDeleter deleter;
struct area { VkDeviceSize size; VkDeviceSize offset; };
std::vector<area> areas;
};
VkDevice device;
VkPhysicalDeviceMemoryProperties properties;
VkPhysicalDevice const physicalDevice;
VkDevice const device;
VkPhysicalDeviceMemoryProperties properties{};
VkPhysicalDeviceMemoryProperties2 properties2{};
VkPhysicalDeviceMemoryBudgetPropertiesEXT budget{};
VkQueue transferQueue;
VkCommandPool transferPool;

View File

@ -24,27 +24,14 @@ device(device), indexedBufferMemory(Allocator::GetNull()), uniformBuffersMemory(
{ // Vertex buffers (const)
size_t vertexSize = sizeof(buffer::vk::vertices[0]) * buffer::vk::vertices.size();
size_t indexSize = sizeof(buffer::vk::indices[0]) * buffer::vk::indices.size();
size_t stagingSize = std::max(vertexSize, indexSize);
Allocator::buffer_info stagingBuffer;
if(auto stagingMemory = alloc.createBuffer(stagingSize, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, stagingBuffer)) {
if(std::vector<Allocator::buffer_info> out; indexedBufferMemory = alloc.createBuffers({
{indexSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT},
{vertexSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT}
}, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, out)) {
indexBuffer = out[0];
vertexBuffer = out[1];
} else {
FATAL("Cannot allocate buffer memory");
}
stagingMemory->write(buffer::vk::vertices.data(), vertexSize);
alloc.copyBuffer(stagingBuffer, vertexBuffer, vertexSize);
stagingMemory->write(buffer::vk::indices.data(), indexSize);
alloc.copyBuffer(stagingBuffer, indexBuffer, indexSize);
vkDestroyBuffer(device, stagingBuffer.buffer, ALLOC); //TODO: move to buffer
if(std::vector<Allocator::buffer_info> out; indexedBufferMemory = alloc.createBuffers({
{indexSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT, buffer::vk::indices.data(), indexSize, 0},
{vertexSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, buffer::vk::vertices.data(), vertexSize, 0}
}, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, out)) {
indexBuffer = out[0];
vertexBuffer = out[1];
} else {
FATAL("Cannot allocate staging memory");
FATAL("Cannot create vertex buffer");
}
}

View File

@ -42,8 +42,8 @@ QueueFamilyIndices QueueFamilyIndices::Query(VkPhysicalDevice device, VkSurfaceK
queueIndices.presentFamily = i;
}
#if DEBUG
LOG_D("Queue " << i << ' ' << (queueFamily.queueFlags & VK_QUEUE_GRAPHICS_BIT ? "graphics " : "")
#if LOG_TRACE
LOG_T("Queue " << i << ' ' << (queueFamily.queueFlags & VK_QUEUE_GRAPHICS_BIT ? "graphics " : "")
<< (queueFamily.queueFlags & VK_QUEUE_COMPUTE_BIT ? "compute " : "")
<< (presentSupport ? "present " : "")
<< (queueFamily.queueFlags & VK_QUEUE_TRANSFER_BIT ? "transfer " : "")
@ -65,4 +65,12 @@ VkSurfaceFormatKHR PhysicalDeviceInfo::getFormat() const {
LOG_W("Using suboptimal surface format");
return swapDetails.formats[0];
}
#include <string.h>
bool PhysicalDeviceInfo::hasMemoryBudget() const {
for (auto extension: optionalExtensions) {
if (strcmp(extension, VK_EXT_MEMORY_BUDGET_EXTENSION_NAME) == 0)
return true;
}
return false;
}

View File

@ -29,11 +29,13 @@ struct PhysicalDeviceInfo {
swapDetails(SwapChainSupportDetails::Query(device, surface)), queueIndices(QueueFamilyIndices::Query(device, surface)) { }
VkSurfaceFormatKHR getFormat() const;
bool hasMemoryBudget() const;
GLFWwindow *window;
VkPhysicalDevice device = VK_NULL_HANDLE;
VkSurfaceKHR surface;
SwapChainSupportDetails swapDetails;
QueueFamilyIndices queueIndices;
std::vector<const char *> optionalExtensions;
};
}

View File

@ -15,13 +15,35 @@
using namespace render::vk;
constexpr auto LOAD_DEVICE = true;
#if LOG_DEBUG
constexpr auto VALIDATION_LAYER = true;
#else
constexpr auto VALIDATION_LAYER = false;
#endif
void set_current_extent(VkSurfaceCapabilitiesKHR &capabilities, GLFWwindow *ptr);
VKAPI_ATTR VkBool32 VKAPI_CALL debugValidationCallback(
VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
VkDebugUtilsMessageTypeFlagsEXT messageType,
const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData,
void *pUserData);
Renderer::Renderer(VkInstance instance, VkDevice device, const PhysicalDeviceInfo& info, const renderOptions& opt):
options(opt), instance(instance), surface(info.surface), device(device),
physicalInfo(std::make_unique<PhysicalDeviceInfo>(info)) {
if constexpr(VALIDATION_LAYER) {
VkDebugUtilsMessengerCreateInfoEXT createInfo{};
createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
createInfo.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
createInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
createInfo.pfnUserCallback = debugValidationCallback;
createInfo.pUserData = nullptr;
if (vkCreateDebugUtilsMessengerEXT(instance, &createInfo, ALLOC, &debugMessenger) != VK_SUCCESS) {
LOG_E("Failed to redirect validation errors");
}
}
set_current_extent(physicalInfo->swapDetails.capabilities, physicalInfo->window);
allocator = std::make_unique<Allocator>(device, *physicalInfo.get());
@ -67,6 +89,9 @@ Renderer::~Renderer() {
vkDestroyDevice(device, ALLOC);
vkDestroySurfaceKHR(instance, surface, ALLOC);
if constexpr(VALIDATION_LAYER) {
vkDestroyDebugUtilsMessengerEXT(instance, debugMessenger, ALLOC);
}
vkDestroyInstance(instance, ALLOC);
}
@ -99,6 +124,37 @@ void set_current_extent(VkSurfaceCapabilitiesKHR &capabilities, GLFWwindow* ptr)
std::max(capabilities.minImageExtent.width, std::min<uint32_t>(capabilities.maxImageExtent.width, windowSize.first)),
std::max(capabilities.minImageExtent.height, std::min<uint32_t>(capabilities.maxImageExtent.height, windowSize.second))};
};
VKAPI_ATTR VkBool32 VKAPI_CALL debugValidationCallback(
VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
VkDebugUtilsMessageTypeFlagsEXT messageType,
const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
void* pUserData)
{
switch (messageSeverity) {
case VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT:
LOG_E("[VK] " << pCallbackData->pMessage);
break;
case VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT:
LOG_W("[VK] " << pCallbackData->pMessage);
break;
default:
LOG_I("[VK] " << pCallbackData->pMessage);
break;
case VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT:
LOG_D("[VK] " << pCallbackData->pMessage);
break;
case VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT:
LOG_T("[VK] " << pCallbackData->pMessage);
break;
}
return VK_FALSE;
}
bool Renderer::Load(Window& window, const renderOptions& opt) {
Window::CreateInfo windowInfo;
@ -135,24 +191,34 @@ bool Renderer::Load(Window& window, const renderOptions& opt) {
std::vector<VkExtensionProperties> availableExtensions(availableExtensionCount);
vkEnumerateInstanceExtensionProperties(nullptr, &availableExtensionCount, availableExtensions.data());
#if DEBUG
LOG_D("Available extensions:");
#if LOG_TRACE
LOG_T("Available instance extensions:");
for (const auto &extension : availableExtensions) {
LOG_D('\t' << extension.extensionName << " : " << extension.specVersion);
LOG_T('\t' << extension.extensionName << " : " << extension.specVersion);
}
#endif
const auto hasExtension = [&availableExtensions](const char *extension) {
return std::any_of(availableExtensions.begin(), availableExtensions.end(), [&extension](const VkExtensionProperties &ex) { return strcmp(ex.extensionName, extension) == 0; });
};
uint32_t glfwExtensionCount = 0;
const char **glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount);
extensions.reserve(glfwExtensionCount);
for (uint32_t i = 0; i < glfwExtensionCount; i++) {
if (std::none_of(availableExtensions.begin(), availableExtensions.end(), [&](const VkExtensionProperties &ex) { return strcmp(ex.extensionName, glfwExtensions[i]) == 0; })) {
if (!hasExtension(glfwExtensions[i])) {
LOG_E("Missing required glfw extension " << glfwExtensions[i]);
return false;
}
extensions.push_back(glfwExtensions[i]);
}
if constexpr (VALIDATION_LAYER) {
if (hasExtension(VK_EXT_DEBUG_UTILS_EXTENSION_NAME)) {
extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
} else {
LOG_W("Debug utils extension unavailable");
}
}
}
createInfo.enabledExtensionCount = extensions.size();
createInfo.ppEnabledExtensionNames = extensions.data();
@ -163,10 +229,10 @@ bool Renderer::Load(Window& window, const renderOptions& opt) {
std::vector<VkLayerProperties> availableLayers(availableLayerCount);
vkEnumerateInstanceLayerProperties(&availableLayerCount, availableLayers.data());
#if DEBUG
LOG_D("Available layers:");
#if LOG_TRACE
LOG_T("Available layers:");
for (const auto &layer : availableLayers) {
LOG_D('\t' << layer.layerName << " : " << layer.specVersion);
LOG_T('\t' << layer.layerName << " : " << layer.specVersion);
}
#endif
@ -181,11 +247,6 @@ bool Renderer::Load(Window& window, const renderOptions& opt) {
} else {
LOG_W("Validation layer unavailable");
}
if (hasLayer(VK_EXT_DEBUG_UTILS_EXTENSION_NAME)) {
layers.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
} else {
LOG_W("Debug utils layer unavailable");
}
}
}
createInfo.enabledLayerCount = layers.size();
@ -213,7 +274,8 @@ bool Renderer::Load(Window& window, const renderOptions& opt) {
}
PhysicalDeviceInfo physicalInfo;
std::vector<const char *> requiredExtensions = {VK_KHR_SWAPCHAIN_EXTENSION_NAME};
const std::vector<const char *> requiredExtensions = {VK_KHR_SWAPCHAIN_EXTENSION_NAME};
const std::vector<const char *> optionalExtensions = {VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME, VK_EXT_MEMORY_BUDGET_EXTENSION_NAME};
{
uint32_t deviceCount = 0;
vkEnumeratePhysicalDevices(instance, &deviceCount, nullptr);
@ -232,8 +294,9 @@ bool Renderer::Load(Window& window, const renderOptions& opt) {
vkGetPhysicalDeviceProperties(device, &deviceProperties);
vkGetPhysicalDeviceFeatures(device, &deviceFeatures);
if (!deviceFeatures.geometryShader)
continue;
auto infos = PhysicalDeviceInfo(window.getPtr(), device, surface);
//FIXME: if (!deviceFeatures.geometryShader) continue;
{
uint32_t availableExtensionsCount;
@ -241,12 +304,24 @@ bool Renderer::Load(Window& window, const renderOptions& opt) {
std::vector<VkExtensionProperties> availableExtensions(availableExtensionsCount);
vkEnumerateDeviceExtensionProperties(device, nullptr, &availableExtensionsCount, availableExtensions.data());
if (std::any_of(requiredExtensions.begin(), requiredExtensions.end(), [&](const char *required) {
return std::none_of(availableExtensions.begin(), availableExtensions.end(), [&](const VkExtensionProperties &ex) {
return strcmp(ex.extensionName, required) == 0;
});
}))
#if LOG_TRACE
LOG_T("Available device extensions:");
for (const auto &extension : availableExtensions) {
LOG_T('\t' << extension.extensionName << " : " << extension.specVersion);
}
#endif
const auto hasExtension = [&availableExtensions](const char *extension) {
return std::any_of(availableExtensions.begin(), availableExtensions.end(), [&extension](const VkExtensionProperties &ex) { return strcmp(ex.extensionName, extension) == 0; });
};
if (std::any_of(requiredExtensions.begin(), requiredExtensions.end(), [&](const char *required) { return !hasExtension(required); }))
continue;
for (auto extension: optionalExtensions) {
if (hasExtension(extension))
infos.optionalExtensions.push_back(extension);
}
}
if (deviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU)
@ -255,7 +330,6 @@ bool Renderer::Load(Window& window, const renderOptions& opt) {
score += deviceProperties.limits.maxImageDimension2D;
//TODO: check others limits
auto infos = PhysicalDeviceInfo(window.getPtr(), device, surface);
if (!infos.queueIndices.isComplete())
continue;
if (infos.queueIndices.isOptimal())
@ -299,6 +373,8 @@ bool Renderer::Load(Window& window, const renderOptions& opt) {
VkPhysicalDeviceFeatures deviceFeatures{};
//TODO:
std::vector<const char*> extensions(requiredExtensions);
extensions.insert(extensions.end(), physicalInfo.optionalExtensions.begin(), physicalInfo.optionalExtensions.end());
VkDeviceCreateInfo createInfo{};
createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
createInfo.pQueueCreateInfos = queueCreateInfos.data();
@ -306,8 +382,8 @@ bool Renderer::Load(Window& window, const renderOptions& opt) {
createInfo.pEnabledFeatures = &deviceFeatures;
createInfo.enabledLayerCount = layers.size();
createInfo.ppEnabledLayerNames = layers.data();
createInfo.enabledExtensionCount = requiredExtensions.size();
createInfo.ppEnabledExtensionNames = requiredExtensions.data();
createInfo.enabledExtensionCount = extensions.size();
createInfo.ppEnabledExtensionNames = extensions.data();
if (vkCreateDevice(physicalInfo.device, &createInfo, ALLOC, &device) != VK_SUCCESS) {
LOG_E("Failed to bind graphic device");

View File

@ -42,6 +42,7 @@ private:
renderOptions options;
VkInstance instance;
VkDebugUtilsMessengerEXT debugMessenger;
VkSurfaceKHR surface;
VkDevice device;

View File

@ -90,7 +90,7 @@ void DistantUniverse::pullNetwork(voxel_pos pos) {
break;
dict.emplace(packet->data + sizeof(server_packet_type), packet->dataLength - sizeof(server_packet_type));
LOG_D("Compression dictionnary loaded");
LOG_T("Compression dictionnary loaded");
break;
}

View File

@ -49,7 +49,7 @@ public:
for(int i = 0; i < count && enet_host_service(host, &event, delay) > 0; i++) {
switch(event.type) {
case ENET_EVENT_TYPE_CONNECT:
LOG_D("Client reconnected");
LOG_T("Client reconnected");
break;
case ENET_EVENT_TYPE_DISCONNECT:
@ -60,30 +60,30 @@ public:
case ENET_EVENT_TYPE_RECEIVE: {
if(event.packet->dataLength < sizeof(server_packet_type)) {
LOG_D("Empty packet from server");
LOG_T("Empty packet from server");
break;
}
const server_packet_type type = static_cast<server_packet_type>(*event.packet->data);
if(type < server_packet_type::BROADCASTED) {
if(event.packet->dataLength < sizeof(server_packet_type) + sizeof(salt)) {
LOG_D("Wrong salted packet size");
LOG_T("Wrong salted packet size");
break;
}
if(memcmp(&salt, event.packet->data + sizeof(server_packet_type), sizeof(salt)) != 0) {
LOG_D("Wrong server salt");
LOG_T("Wrong server salt");
break;
}
}
if(type == server_packet_type::CHALLENGE) {
if(event.packet->dataLength != sizeof(server_packet_type) + 2 * sizeof(salt)) {
LOG_D("Wrong challenge packet size");
LOG_T("Wrong challenge packet size");
break;
}
salt_t l;
PacketReader(event.packet).read(l);
salt ^= l;
LOG_D("Handshake done");
LOG_T("Handshake done");
ready = true;
break;
}

View File

@ -5,11 +5,20 @@
#include <iomanip>
#define _OUT(expr) {std::ostringstream oss; oss << expr << std::endl; std::cout << oss.str();}
#define LOG(expr) _OUT("[" << logger::now() << "] " << expr)
#define LOG(expr) _OUT("[" << BOLD << logger::now() << END_COLOR << "] " << BOLD << expr << END_COLOR)
#define LOG_E(expr) _OUT("[" << RED << logger::now() << END_COLOR << "] " << expr)
#define LOG_W(expr) _OUT("[" << YELLOW << logger::now() << END_COLOR << "] " << expr)
#define LOG_I(expr) _OUT("[" << GREEN << logger::now() << END_COLOR << "] " << expr)
#define LOG_D(expr) _OUT("[" << GREY << logger::now() << END_COLOR << "] " << expr)
#if LOG_DEBUG
#define LOG_D(expr) _OUT("[" << END_COLOR << logger::now() << END_COLOR << "] " << expr)
#else
#define LOG_D(expr)
#endif
#if LOG_TRACE
#define LOG_T(expr) _OUT("[" << GREY << logger::now() << END_COLOR << "] " << expr)
#else
#define LOG_T(expr)
#endif
#define FATAL(expr) LOG_E(expr); exit(EXIT_FAILURE)
namespace logger {

View File

@ -43,7 +43,7 @@ Universe::Universe(const Universe::options &options): host(options.connection, o
}
assert(tmp.size() == size && "Corrupted areas index");
far_areas = data::generational::vector<Area::params>(tmp);
LOG_D(far_areas.size() << " areas loaded");
LOG_T(far_areas.size() << " areas loaded");
} else {
LOG_E("No index file!!! Probably a new world...");
//TODO: generate universe
@ -333,7 +333,7 @@ void Universe::pullNetwork() {
host.pull(
[&](peer_t *peer, salt_t salt) {
ZoneScopedN("Connect");
LOG_D("Client connect from " << peer->address);
LOG_I("Client connect from " << peer->address);
net_client* client = new net_client(salt, entities.at(PLAYER_ENTITY_ID).instances.emplace(Entity::Instance{ }));
peer->data = client;
@ -347,14 +347,14 @@ void Universe::pullNetwork() {
},
[](peer_t *peer, disconnect_reason reason) {
ZoneScopedN("Disconnect");
LOG_D("Client disconnect from " << peer->address << " with " << (enet_uint32)reason);
LOG_I("Client disconnect from " << peer->address << " with " << (enet_uint32)reason);
if (const auto data = Server::GetPeerData<net_client>(peer); data != nullptr)
delete data;
},
[&](peer_t *peer, packet_t* packet, channel_type) {
ZoneScopedN("Data");
if(packet->dataLength < sizeof(client_packet_type) + sizeof(salt_t)) {
LOG_D("Empty packet from " << peer->address);
LOG_T("Empty packet from " << peer->address);
return;
}
if (memcmp(peer->data, packet->data + sizeof(client_packet_type), sizeof(salt_t)) != 0) {
@ -368,7 +368,7 @@ void Universe::pullNetwork() {
case client_packet_type::MOVE: {
if(voxel_pos pos; !PacketReader(packet).read(pos) ||
!movePlayer(Server::GetPeerData<net_client>(peer)->instanceId, pos)) {
LOG_D("Bad move");
LOG_T("Bad move");
}
break;
}
@ -377,7 +377,7 @@ void Universe::pullNetwork() {
//TODO: handle inventory
setCube(fill->pos, fill->val, fill->radius);
} else {
LOG_D("Bad fill");
LOG_T("Bad fill");
}
break;
}
@ -396,16 +396,16 @@ void Universe::pullNetwork() {
host.send(peer, serializeChunk({std::make_pair(id, cpos), std::dynamic_pointer_cast<Chunk>(chunk->second)}), net::channel_type::RELIABLE);
}
} else {
LOG_D("Request out of range chunk");
LOG_T("Request out of range chunk");
}
}
} else {
LOG_D("Bad chunk request");
LOG_T("Bad chunk request");
}
break;
}
default:
LOG_D("Bad packet from " << peer->address);
LOG_T("Bad packet from " << peer->address);
break;
}
});