summaryrefslogtreecommitdiff
path: root/src/renderer.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/renderer.c')
-rw-r--r--src/renderer.c645
1 files changed, 645 insertions, 0 deletions
diff --git a/src/renderer.c b/src/renderer.c
new file mode 100644
index 0000000..078408d
--- /dev/null
+++ b/src/renderer.c
@@ -0,0 +1,645 @@
+#include "renderer.h"
+#include "window.h"
+#include "buffer.h"
+#include "pipeline.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+static VkDebugUtilsMessengerEXT debug_messenger;
+
+static VKAPI_ATTR VkBool32 VKAPI_CALL debug_callback(VkDebugUtilsMessageSeverityFlagBitsEXT severity,
+ VkDebugUtilsMessageTypeFlagsEXT type, const VkDebugUtilsMessengerCallbackDataEXT *callback_data, void *data) {
+ (void) severity; (void) type; (void) data;
+ dbglogf("%s", callback_data->pMessage);
+ return VK_FALSE;
+}
+
+static bool enumerate_phygpus(struct vlkn_renderer *renderer) {
+ vkEnumeratePhysicalDevices(renderer->instance, &renderer->phy_gpus.cap, NULL);
+ if (renderer->phy_gpus.cap == 0)
+ return false;
+
+ renderer->phy_gpus.gpus = calloc(renderer->phy_gpus.cap, sizeof(*renderer->phy_gpus.gpus));
+ VkPhysicalDevice gpus[renderer->phy_gpus.cap];
+ vkEnumeratePhysicalDevices(renderer->instance, &renderer->phy_gpus.cap, gpus);
+
+ for (uint32_t i = 0; i < renderer->phy_gpus.cap; i++) {
+ uint32_t count;
+
+ vkEnumerateDeviceExtensionProperties(gpus[i], NULL, &count, NULL);
+ VkExtensionProperties ext_props[count];
+ vkEnumerateDeviceExtensionProperties(gpus[i], NULL, &count, ext_props);
+
+ VkPhysicalDeviceFeatures feats;
+ vkGetPhysicalDeviceFeatures(gpus[i], &feats);
+ if (!feats.fragmentStoresAndAtomics) {
+ dbglog("no atomic store");
+ continue;
+ }
+
+ bool swapchain = false;
+ for (size_t i = 0; i < count; i++)
+ if (strcmp(ext_props[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0) {
+ swapchain = true;
+ break;
+ }
+
+ if (!swapchain)
+ continue;
+
+ vkGetPhysicalDeviceQueueFamilyProperties(gpus[i], &count, NULL);
+ VkQueueFamilyProperties queue_props[count];
+ vkGetPhysicalDeviceQueueFamilyProperties(gpus[i], &count, queue_props);
+
+ ssize_t gfx_queue = -1, present_queue = -1;
+
+ for (size_t q = 0; q < count; q++) {
+ if ((queue_props[q].queueFlags & VK_QUEUE_GRAPHICS_BIT))
+ gfx_queue = q;
+
+ VkBool32 present_support = false;
+ vkGetPhysicalDeviceSurfaceSupportKHR(gpus[i], q, renderer->surface, &present_support);
+
+ if (present_support)
+ present_queue = q;
+ }
+
+ if (gfx_queue == -1 || present_queue == -1)
+ continue;
+
+ struct surface_caps caps;
+
+ vkGetPhysicalDeviceSurfaceFormatsKHR(gpus[i], renderer->surface, &caps.formats.len, NULL);
+ if (caps.formats.len == 0)
+ return false;
+ vkGetPhysicalDeviceSurfacePresentModesKHR(gpus[i], renderer->surface, &caps.present_modes.len, NULL);
+ if (caps.present_modes.len == 0)
+ return false;
+
+ vkGetPhysicalDeviceSurfaceCapabilitiesKHR(gpus[i], renderer->surface, &caps.caps);
+ vkGetPhysicalDeviceSurfaceFormatsKHR(gpus[i], renderer->surface,
+ &caps.formats.len, (caps.formats.data = calloc(caps.formats.len, sizeof(*caps.formats.data))));
+ vkGetPhysicalDeviceSurfacePresentModesKHR(gpus[i], renderer->surface,
+ &caps.present_modes.len, (caps.present_modes.data = calloc(caps.present_modes.len, sizeof(*caps.present_modes.data))));
+
+ renderer->phy_gpus.gpus[renderer->phy_gpus.len++] = (struct phy_gpu) {
+ .gpu = gpus[i],
+ .graphics_queue = gfx_queue,
+ .present_queue = present_queue,
+ .surface_caps = caps
+ };
+
+ if (!renderer->phy_gpus.chosen) {
+ VkPhysicalDeviceProperties props;
+ vkGetPhysicalDeviceProperties(gpus[i], &props);
+ if (props.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU)
+ renderer->phy_gpus.chosen = &renderer->phy_gpus.gpus[renderer->phy_gpus.len - 1];
+ }
+ }
+
+ if (renderer->phy_gpus.len == 0)
+ return false;
+
+ if (!renderer->phy_gpus.chosen)
+ renderer->phy_gpus.chosen = &renderer->phy_gpus.gpus[0];
+
+ return true;
+}
+
+static VkResult create_device(struct vlkn_renderer *ren) {
+ float queue_prio = 1.0f;
+ VkDeviceQueueCreateInfo queue_infos[] = {
+ {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
+ .queueFamilyIndex = ren->phy_gpus.chosen->graphics_queue,
+ .queueCount = 1,
+ .pQueuePriorities = &queue_prio
+ },
+ {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
+ .queueFamilyIndex = ren->phy_gpus.chosen->present_queue,
+ .queueCount = 1,
+ .pQueuePriorities = &queue_prio
+ }
+ };
+
+ const char * const ext = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
+
+ VkPhysicalDeviceFeatures feats = { .fragmentStoresAndAtomics = VK_TRUE };
+ VkPhysicalDeviceVulkan12Features feats12 = {
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,
+ .runtimeDescriptorArray = VK_TRUE
+ };
+
+ VkDeviceCreateInfo create_info = {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
+ .pNext = &feats12,
+ .pQueueCreateInfos = queue_infos,
+ .queueCreateInfoCount = len(queue_infos),
+ .enabledExtensionCount = 1,
+ .ppEnabledExtensionNames = &ext,
+ .pEnabledFeatures = &feats
+ };
+
+ VkResult res = vkCreateDevice(ren->phy_gpus.chosen->gpu, &create_info, NULL, &ren->gpu.device);
+ if (res != VK_SUCCESS)
+ return res;
+
+ vkGetDeviceQueue(ren->gpu.device, ren->phy_gpus.chosen->graphics_queue, 0, &ren->gpu.gfx_queue);
+ vkGetDeviceQueue(ren->gpu.device, ren->phy_gpus.chosen->present_queue, 0, &ren->gpu.present_queue);
+
+ return VK_SUCCESS;
+}
+
+static VkSurfaceFormatKHR pick_swapchain_format(const struct surface_caps *caps) {
+ assert(caps && caps->formats.data && caps->formats.len > 1);
+ for (size_t i = 0; i < caps->formats.len; i++) {
+ VkSurfaceFormatKHR format = caps->formats.data[i];
+ if (format.format == VK_FORMAT_B8G8R8A8_SRGB && format.colorSpace == VK_COLORSPACE_SRGB_NONLINEAR_KHR)
+ return format;
+ }
+ return caps->formats.data[0];
+}
+
+static VkPresentModeKHR pick_present_mode(const struct surface_caps *caps) {
+ assert(caps && caps->present_modes.data);
+ for (size_t i = 0; i < caps->present_modes.len; i++) {
+ VkPresentModeKHR mode = caps->present_modes.data[i];
+ if (mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)
+ return mode;
+ }
+ return VK_PRESENT_MODE_FIFO_KHR;
+}
+
+static VkResult create_renderpass(struct vlkn_renderer *ren) {
+ VkAttachmentDescription color_attach = {
+ .format = ren->swapchain.format,
+ .samples = VK_SAMPLE_COUNT_1_BIT,
+ .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
+ .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+ .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
+ .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
+ .finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR
+ };
+
+ VkAttachmentReference color_attach_ref = {
+ .attachment = 0,
+ .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ };
+
+ VkAttachmentDescription depth_attach = {
+ .format = VK_FORMAT_D32_SFLOAT,
+ .samples = VK_SAMPLE_COUNT_1_BIT,
+ .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
+ .storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
+ .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
+ .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
+ .finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
+ };
+
+ VkAttachmentReference depth_attach_ref = {
+ .attachment = 1,
+ .layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
+ };
+
+ VkSubpassDescription subpasses[] = {
+ {
+ .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
+ .colorAttachmentCount = 1,
+ .pColorAttachments = &color_attach_ref,
+ .pDepthStencilAttachment = &depth_attach_ref
+ }
+ };
+
+ VkSubpassDependency dep = {
+ .srcSubpass = VK_SUBPASS_EXTERNAL,
+ .dstSubpass = 0,
+ .srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
+ .srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
+ .dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
+ .dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT
+ };
+
+ VkAttachmentDescription attachs[] = { color_attach, depth_attach };
+
+ VkRenderPassCreateInfo create_info = {
+ .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+ .attachmentCount = len(attachs),
+ .pAttachments = attachs,
+ .subpassCount = len(subpasses),
+ .pSubpasses = subpasses,
+ .dependencyCount = 1,
+ .pDependencies = &dep
+ };
+
+ return vkCreateRenderPass(ren->gpu.device, &create_info, NULL, &ren->render_pass);
+}
+
+void swapchain_destroy(struct vlkn_renderer *ren);
+static VkResult create_swapchain(struct vlkn_renderer *ren) {
+ struct surface_caps *caps = &ren->phy_gpus.chosen->surface_caps;
+ VkSurfaceFormatKHR format = pick_swapchain_format(caps);
+ ren->swapchain.format = format.format;
+ ren->swapchain.extent = caps->caps.currentExtent.width != UINT32_MAX ? caps->caps.currentExtent :
+ (VkExtent2D) {
+ .width = ren->win->width,
+ .height = ren->win->height
+ };
+
+ uint32_t image_count = caps->caps.minImageCount + 1;
+ if (caps->caps.maxImageCount > 0 && image_count > caps->caps.maxImageCount)
+ image_count = caps->caps.maxImageCount;
+
+ // TODO: understand those values
+ VkSwapchainCreateInfoKHR create_info = {
+ .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
+ .surface = ren->surface,
+ .minImageCount = image_count,
+ .imageFormat = ren->swapchain.format,
+ .imageColorSpace = format.colorSpace,
+ .imageExtent = ren->swapchain.extent,
+ .imageArrayLayers = 1,
+ .imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
+ .preTransform = caps->caps.currentTransform,
+ .compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
+ .presentMode = pick_present_mode(caps),
+ .clipped = VK_TRUE,
+ };
+
+ uint32_t queue_families[] = { ren->phy_gpus.chosen->present_queue, ren->phy_gpus.chosen->graphics_queue };
+ if (queue_families[0] == queue_families[1]) {
+ create_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
+ create_info.queueFamilyIndexCount = 2;
+ create_info.pQueueFamilyIndices = queue_families;
+ } else {
+ create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ }
+
+ VkResult res = vkCreateSwapchainKHR(ren->gpu.device, &create_info, NULL, &ren->swapchain.swapchain);
+ if (res != VK_SUCCESS)
+ return false;
+
+ if (ren->render_pass == VK_NULL_HANDLE) {
+ res = create_renderpass(ren);
+ if (res != VK_SUCCESS)
+ return res;
+ }
+
+ vkGetSwapchainImagesKHR(ren->gpu.device, ren->swapchain.swapchain, &ren->swapchain.images.len, NULL);
+ ren->swapchain.images.data = calloc(ren->swapchain.images.len, sizeof(*ren->swapchain.images.data));
+ VkImage images[ren->swapchain.images.len];
+ vkGetSwapchainImagesKHR(ren->gpu.device, ren->swapchain.swapchain, &ren->swapchain.images.len, images);
+
+ struct image_opts opts = {
+ .extent = ren->swapchain.extent,
+ .mip_level = 1,
+ .format = VK_FORMAT_D32_SFLOAT,
+ .tiling = VK_IMAGE_TILING_OPTIMAL,
+ .usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
+ .mem_props = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
+ .aspect = VK_IMAGE_ASPECT_DEPTH_BIT
+ };
+
+ res = image_create(ren, opts, &ren->depth);
+ if (res != VK_SUCCESS)
+ return res;
+
+ for (size_t i = 0; i < ren->swapchain.images.len; i++) {
+ ren->swapchain.images.data->image = images[i];
+ res = image_view_create(ren, ren->swapchain.format,
+ VK_IMAGE_ASPECT_COLOR_BIT, images[i], &ren->swapchain.images.data[i].view);
+ if (res != VK_SUCCESS)
+ return res;
+
+ VkImageView attachs[] = { ren->swapchain.images.data[i].view, ren->depth.view };
+ VkFramebufferCreateInfo fb_info = {
+ .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+ .renderPass = ren->render_pass,
+ .attachmentCount = len(attachs),
+ .pAttachments = attachs,
+ .width = ren->swapchain.extent.width,
+ .height = ren->swapchain.extent.height,
+ .layers = 1
+ };
+ res = vkCreateFramebuffer(ren->gpu.device, &fb_info, NULL, &ren->swapchain.images.data[i].framebuffer);
+ if (res != VK_SUCCESS)
+ return res;
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult create_sync_objects(struct vlkn_renderer *ren) {
+ VkSemaphoreCreateInfo semaphore_info = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO
+ };
+ VkFenceCreateInfo fence_info = {
+ .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+ .flags = VK_FENCE_CREATE_SIGNALED_BIT
+ };
+ VkResult res;
+#define X(exp) do { res = exp; if (res != VK_SUCCESS) return res; } while (0)
+ for (size_t i = 0; i < MAX_FRAMES; i++) {
+ X(vkCreateSemaphore(ren->gpu.device, &semaphore_info, NULL, &ren->locks.image_available[i]));
+ X(vkCreateSemaphore(ren->gpu.device, &semaphore_info, NULL, &ren->locks.render_finished[i]));
+ X(vkCreateFence(ren->gpu.device, &fence_info, NULL, &ren->locks.in_flight[i]));
+ }
+#undef X
+ return VK_SUCCESS;
+}
+
+static VkResult create_command_pool(struct vlkn_renderer *ren) {
+ VkCommandPoolCreateInfo pool_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
+ .flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
+ .queueFamilyIndex = ren->phy_gpus.chosen->graphics_queue
+ };
+
+ VkResult res = vkCreateCommandPool(ren->gpu.device, &pool_info, NULL, &ren->command.pool);
+ if (res != VK_SUCCESS)
+ return res;
+
+ VkCommandBufferAllocateInfo alloc_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ .commandPool = ren->command.pool,
+ .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ .commandBufferCount = MAX_FRAMES
+ };
+
+ return vkAllocateCommandBuffers(ren->gpu.device, &alloc_info, ren->command.buffers);
+}
+
+void resize_callback(void *data) {
+ struct vlkn_renderer *ren = data;
+
+ if (ren->win->width == 0 || ren->win->height == 0)
+ return;
+ vkDeviceWaitIdle(ren->gpu.device);
+ swapchain_destroy(ren);
+ create_swapchain(ren);
+}
+
+struct vlkn_renderer *vlkn_renderer_init(struct vlkn_window *win) {
+ struct vlkn_renderer *ren = calloc(1, sizeof(*ren));
+ if (!ren)
+ return NULL;
+
+ ren->win = win;
+
+ VkApplicationInfo app_info = {
+ .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
+ .pApplicationName = win->title ? win->title : "",
+ .applicationVersion = VK_MAKE_VERSION(1, 0, 0),
+ .pEngineName = "void",
+ .engineVersion = VK_MAKE_VERSION(1, 0, 0),
+ .apiVersion = VK_API_VERSION_1_3
+ };
+
+ // TODO: query window
+ const char *exts[] = {
+ VK_KHR_SURFACE_EXTENSION_NAME,
+ VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
+ VK_EXT_DEBUG_UTILS_EXTENSION_NAME,
+ };
+
+ const char *validation_layers[] = { "VK_LAYER_KHRONOS_validation" };
+
+ VkInstanceCreateInfo create_info = {
+ .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
+ .pApplicationInfo = &app_info,
+ .enabledExtensionCount = len(exts),
+ .ppEnabledExtensionNames = exts,
+ .enabledLayerCount = 1,
+ .ppEnabledLayerNames = validation_layers
+ };
+
+ if (!vlkn_check(vkCreateInstance(&create_info, NULL, &ren->instance)))
+ goto err;
+
+ VkDebugUtilsMessengerCreateInfoEXT debug_create_info = {
+ .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,
+ .messageSeverity = //VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT
+ | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT,
+ .messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT
+ | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT
+ | VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT,
+ .pfnUserCallback = debug_callback
+ };
+ ((PFN_vkCreateDebugUtilsMessengerEXT)
+ vkGetInstanceProcAddr(ren->instance, "vkCreateDebugUtilsMessengerEXT"))(ren->instance, &debug_create_info, NULL, &debug_messenger);
+
+ ren->surface = window_create_vk_surface(ren->instance, win);
+ if (ren->surface == VK_NULL_HANDLE) {
+ dbglog("failed to create window surface");
+ goto err;
+ }
+
+ if (!enumerate_phygpus(ren)) {
+ dbglog("failed to enumerate physical gpus");
+ goto err;
+ }
+
+ if (!vlkn_check(create_device(ren)))
+ goto err;
+
+ if (!vlkn_check(create_swapchain(ren)))
+ goto err;
+
+ if (!vlkn_check(create_sync_objects(ren)))
+ goto err;
+
+ if (!vlkn_check(create_command_pool(ren)))
+ goto err;
+
+ window_on_resize(win, resize_callback, ren);
+ goto out;
+err:
+ ren = (vlkn_renderer_destroy(ren), NULL);
+out:
+ return ren;
+}
+
+void swapchain_destroy(struct vlkn_renderer *ren) {
+ image_destroy(ren, &ren->depth);
+
+ for (size_t i = 0; i < ren->swapchain.images.len; i++) {
+ vkDestroyFramebuffer(ren->gpu.device, ren->swapchain.images.data[i].framebuffer, NULL);
+ vkDestroyImageView(ren->gpu.device, ren->swapchain.images.data[i].view, NULL);
+ }
+ free(ren->swapchain.images.data);
+ vkDestroySwapchainKHR(ren->gpu.device, ren->swapchain.swapchain, NULL);
+}
+
+void vlkn_renderer_destroy(struct vlkn_renderer *renderer) {
+ if (!renderer)
+ return;
+ if (renderer->gpu.device) {
+ vkDeviceWaitIdle(renderer->gpu.device);
+ for (size_t i = 0; i < MAX_FRAMES; i++) {
+ vkDestroySemaphore(renderer->gpu.device, renderer->locks.image_available[i], NULL);
+ vkDestroySemaphore(renderer->gpu.device, renderer->locks.render_finished[i], NULL);
+ vkDestroyFence(renderer->gpu.device, renderer->locks.in_flight[i], NULL);
+ }
+
+ swapchain_destroy(renderer);
+
+ vkDestroyCommandPool(renderer->gpu.device, renderer->command.pool, NULL);
+
+ vkDestroyPipeline(renderer->gpu.device, renderer->pipeline.gfx, NULL);
+ vkDestroyPipeline(renderer->gpu.device, renderer->pipeline.blend, NULL);
+ vkDestroyPipelineLayout(renderer->gpu.device, renderer->pipeline.layout, NULL);
+
+ vkDestroyRenderPass(renderer->gpu.device, renderer->render_pass, NULL);
+
+ vkDestroyDevice(renderer->gpu.device, NULL);
+ }
+
+ if (renderer->instance) {
+ vkDestroySurfaceKHR(renderer->instance, renderer->surface, NULL);
+
+ ((PFN_vkDestroyDebugUtilsMessengerEXT)
+ vkGetInstanceProcAddr(renderer->instance, "vkDestroyDebugUtilsMessengerEXT"))(renderer->instance, debug_messenger, NULL);
+ vkDestroyInstance(renderer->instance, NULL);
+ }
+
+ free(renderer);
+}
+
+uint32_t idx;
+void vlkn_render(struct vlkn_renderer *renderer) {
+ size_t frame = 0;
+ vkWaitForFences(renderer->gpu.device, 1, &renderer->locks.in_flight[frame], VK_TRUE, UINT64_MAX);
+ vkResetFences(renderer->gpu.device, 1, &renderer->locks.in_flight[frame]);
+
+ VkResult res = vkAcquireNextImageKHR(renderer->gpu.device, renderer->swapchain.swapchain,
+ UINT64_MAX, renderer->locks.image_available[frame], VK_NULL_HANDLE, &idx);
+ switch (res) {
+ case VK_SUCCESS:
+ case VK_SUBOPTIMAL_KHR:
+ break;
+ case VK_ERROR_OUT_OF_DATE_KHR:
+ resize_callback(renderer);
+ return;
+ default:
+ dbglog("failed to aquire swapchain images");
+ abort();
+ }
+
+
+ VkCommandBuffer buffer = renderer->command.buffers[frame];
+ vkResetCommandBuffer(renderer->command.buffers[frame], 0);
+
+ VkCommandBufferBeginInfo begin_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+ };
+
+ if (!vlkn_check(vkBeginCommandBuffer(buffer, &begin_info)))
+ return;
+
+ VkClearValue clear_color[] = {
+ { .color = {{ 0.0f, 0.0f, 0.0f, 1.0f }} },
+ { .depthStencil = { 1.0f, 0.0f }}
+ };
+ VkRenderPassBeginInfo render_pass_info = {
+ .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
+ .renderPass = renderer->render_pass,
+ .framebuffer = renderer->swapchain.images.data[idx].framebuffer,
+ .renderArea = {
+ .extent = renderer->swapchain.extent,
+ .offset = {0, 0}
+ },
+ .clearValueCount = len(clear_color),
+ .pClearValues = clear_color
+ };
+
+ vkCmdBeginRenderPass(buffer, &render_pass_info, VK_SUBPASS_CONTENTS_INLINE);
+
+ VkViewport viewport = {
+ .x = 0.0f,
+ .y = 0.0f,
+ .width = renderer->swapchain.extent.width,
+ .height = renderer->swapchain.extent.height,
+ .minDepth = 0.0f,
+ .maxDepth = 1.0f,
+ };
+ vkCmdSetViewport(buffer, 0, 1, &viewport);
+
+ VkRect2D scissor = {
+ .offset = {0, 0},
+ .extent = renderer->swapchain.extent
+ };
+
+ vkCmdSetScissor(buffer, 0, 1, &scissor);
+}
+
+void vlkn_draw(struct vlkn_renderer *renderer, struct vlkn_pipeline *pipeline,
+ size_t mesh_len, struct vlkn_mesh meshes[mesh_len]) {
+ size_t frame = 0;
+ VkCommandBuffer buffer = renderer->command.buffers[frame];
+
+ vkCmdBindPipeline(buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline->pipeline);
+
+ /*
+ vkCmdBindDescriptorSets(buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
+ pipeline->layout, 0, 1, &desc_sets[current_frame], 0, NULL);
+ */
+
+ for (size_t i = 0; i < mesh_len; i++) {
+ vkCmdBindVertexBuffers(buffer, 0, 1, &meshes[i].vertex.buffer, (VkDeviceSize[]){0});
+ vkCmdBindIndexBuffer(buffer, meshes[i].index.buffer, 0, VK_INDEX_TYPE_UINT32);
+ vkCmdDrawIndexed(buffer, meshes[i].index_count, 1, 0, 0, 0);
+ vkCmdPushConstants(buffer, pipeline->layout,
+ VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(struct vec3), &meshes[i].position);
+ }
+}
+
+void vlkn_present(struct vlkn_renderer *renderer) {
+ size_t frame = 0;
+ VkCommandBuffer buffer = renderer->command.buffers[frame];
+
+ vkCmdEndRenderPass(buffer);
+ vlkn_check(vkEndCommandBuffer(buffer));
+
+ VkSubmitInfo submit = {
+ .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ .waitSemaphoreCount = 1,
+ .pWaitSemaphores = &renderer->locks.image_available[frame],
+ .pWaitDstStageMask = (VkPipelineStageFlags[]) { VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT },
+ .signalSemaphoreCount = 1,
+ .pSignalSemaphores = &renderer->locks.render_finished[frame],
+ .commandBufferCount = 1,
+ .pCommandBuffers = &renderer->command.buffers[frame]
+ };
+
+ if (!vlkn_check(vkQueueSubmit(renderer->gpu.gfx_queue, 1, &submit, renderer->locks.in_flight[frame])))
+ return;
+
+ VkPresentInfoKHR present = {
+ .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
+ .waitSemaphoreCount = 1,
+ .pWaitSemaphores = &renderer->locks.render_finished[frame],
+ .swapchainCount = 1,
+ .pSwapchains = &renderer->swapchain.swapchain,
+ .pImageIndices = &idx
+ };
+
+ switch (vkQueuePresentKHR(renderer->gpu.present_queue, &present)) {
+ case VK_SUCCESS:
+ break;
+ case VK_SUBOPTIMAL_KHR:
+ case VK_ERROR_OUT_OF_DATE_KHR:
+ resize_callback(renderer);
+ break;
+ default:
+ abort();
+ return;
+ }
+
+ wl_display_roundtrip(renderer->win->dpy);
+ //ren->current_frame = (ren->current_frame + 1) % MAX_FRAMES;
+}