summaryrefslogtreecommitdiff
path: root/src/render
diff options
context:
space:
mode:
authorAnna (navi) Figueiredo Gomes <navi@vlhl.dev>2024-02-07 22:49:00 +0100
committerAnna (navi) Figueiredo Gomes <navi@vlhl.dev>2024-02-07 22:49:00 +0100
commit35a70d71f62e41d78d68247075ce174f2b6d997a (patch)
treec9af8d8c44256abfc100c396182fd27f1f4c7263 /src/render
inital commit -- THE CUBES SPIN
Signed-off-by: Anna (navi) Figueiredo Gomes <navi@vlhl.dev>
Diffstat (limited to 'src/render')
-rw-r--r--src/render/buffer.c117
-rw-r--r--src/render/mesh.c83
-rw-r--r--src/render/renderer.c889
-rw-r--r--src/render/shader.c31
-rw-r--r--src/render/util.c1
5 files changed, 1121 insertions, 0 deletions
diff --git a/src/render/buffer.c b/src/render/buffer.c
new file mode 100644
index 0000000..2765a24
--- /dev/null
+++ b/src/render/buffer.c
@@ -0,0 +1,117 @@
+#include "render/buffer.h"
+#include "render/renderer.h"
+#include <sys/types.h>
+
+static ssize_t find_memory_type(struct renderer *ren, uint32_t filter, VkMemoryPropertyFlags props) {
+ VkPhysicalDeviceMemoryProperties mem_props;
+ vkGetPhysicalDeviceMemoryProperties(ren->phy_gpus.chosen->gpu, &mem_props);
+
+ for (size_t i = 0; i < mem_props.memoryTypeCount; i++)
+ if (filter & (1 << i) && (mem_props.memoryTypes[i].propertyFlags & props) == props)
+ return i;
+
+ return -1;
+}
+
+VkResult buffer_create(struct renderer *ren, VkDeviceSize size,
+ VkBufferUsageFlags usage, VkMemoryPropertyFlags props, struct buffer *buf) {
+ VkBufferCreateInfo buffer_info = {
+ .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ .size = size,
+ .usage = usage,
+ .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
+ };
+
+ VkResult res = vkCreateBuffer(ren->gpu.device, &buffer_info, NULL, &buf->buffer);
+ if (res != VK_SUCCESS)
+ return res;
+
+ VkMemoryRequirements mem_reqs;
+ vkGetBufferMemoryRequirements(ren->gpu.device, buf->buffer, &mem_reqs);
+
+ ssize_t mem_type_index = find_memory_type(ren, mem_reqs.memoryTypeBits, props);
+ if (mem_type_index == -1)
+ return VK_ERROR_UNKNOWN;
+
+ VkMemoryAllocateInfo alloc_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ .allocationSize = mem_reqs.size,
+ .memoryTypeIndex = mem_type_index
+ };
+
+ res = vkAllocateMemory(ren->gpu.device, &alloc_info, NULL, &buf->memory);
+ if (res != VK_SUCCESS)
+ return res;
+
+ vkBindBufferMemory(ren->gpu.device, buf->buffer, buf->memory, 0);
+
+ return VK_SUCCESS;
+}
+
+void buffer_destroy(struct renderer *ren, struct buffer *buf) {
+ vkDestroyBuffer(ren->gpu.device, buf->buffer, NULL);
+ vkFreeMemory(ren->gpu.device, buf->memory, NULL);
+}
+
+
+VkResult create_image(struct renderer *ren, VkExtent2D extent, uint32_t mip_level, VkFormat format, VkImageTiling tiling,
+ VkImageUsageFlags usage, VkMemoryPropertyFlags props, VkImage *img, VkDeviceMemory *memory) {
+ VkImageCreateInfo image_info = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+ .imageType = VK_IMAGE_TYPE_2D,
+ .extent = {
+ .width = extent.width,
+ .height = extent.height,
+ .depth = 1
+ },
+ .mipLevels = mip_level,
+ .arrayLayers = 1,
+ .format = format,
+ .tiling = tiling,
+ .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
+ .usage = usage,
+ .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
+ .samples = VK_SAMPLE_COUNT_1_BIT
+ };
+
+ VkResult res = vkCreateImage(ren->gpu.device, &image_info, NULL, img);
+ if (res != VK_SUCCESS)
+ return res;
+
+ VkMemoryRequirements mem_reqs;
+ vkGetImageMemoryRequirements(ren->gpu.device, *img, &mem_reqs);
+
+ VkMemoryAllocateInfo alloc_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ .allocationSize = mem_reqs.size,
+ .memoryTypeIndex = find_memory_type(ren, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
+ };
+
+ res = vkAllocateMemory(ren->gpu.device, &alloc_info, NULL, memory);
+ if (res != VK_SUCCESS)
+ return res;
+
+ return vkBindImageMemory(ren->gpu.device, *img, *memory, 0);
+}
+
+VkResult create_image_view(VkDevice gpu, VkImage image, VkFormat format, VkImageAspectFlags flags, VkImageView *view) {
+ return vkCreateImageView(gpu, &(VkImageViewCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .image = image,
+ .viewType = VK_IMAGE_VIEW_TYPE_2D,
+ .format = format,
+ .components = {
+ .r = VK_COMPONENT_SWIZZLE_IDENTITY,
+ .g = VK_COMPONENT_SWIZZLE_IDENTITY,
+ .b = VK_COMPONENT_SWIZZLE_IDENTITY,
+ .a = VK_COMPONENT_SWIZZLE_IDENTITY,
+ },
+ .subresourceRange = {
+ .aspectMask = flags,
+ .baseMipLevel = 0,
+ .levelCount = 1,
+ .baseArrayLayer = 0,
+ .layerCount = 1
+ }
+ }, NULL, view);
+}
diff --git a/src/render/mesh.c b/src/render/mesh.c
new file mode 100644
index 0000000..d2229f1
--- /dev/null
+++ b/src/render/mesh.c
@@ -0,0 +1,83 @@
+#include "render/mesh.h"
+#include "render/buffer.h"
+#include "render/renderer.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+static VkCommandBuffer begin_single_command(struct renderer *ren) {
+ VkCommandBufferAllocateInfo alloc_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ .commandPool = ren->command.pool,
+ .commandBufferCount = 1
+ };
+
+ VkCommandBuffer buffer;
+ vkAllocateCommandBuffers(ren->gpu.device, &alloc_info, &buffer);
+
+ VkCommandBufferBeginInfo begin_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+ .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT
+ };
+
+ vkBeginCommandBuffer(buffer, &begin_info);
+
+ return buffer;
+}
+
+static void end_single_command(struct renderer *ren, VkCommandBuffer buffer) {
+ vkEndCommandBuffer(buffer);
+
+ VkSubmitInfo submit_info = {
+ .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ .commandBufferCount = 1,
+ .pCommandBuffers = &buffer
+ };
+
+ vkQueueSubmit(ren->gpu.gfx_queue, 1, &submit_info, VK_NULL_HANDLE);
+ vkQueueWaitIdle(ren->gpu.gfx_queue);
+
+ vkFreeCommandBuffers(ren->gpu.device, ren->command.pool, 1, &buffer);
+}
+
+struct mesh upload_mesh(struct renderer *ren, size_t vertex_count, struct vertex vertices[vertex_count],
+ size_t index_count, uint32_t indices[index_count], struct vec3 position) {
+ const VkDeviceSize vertex_buffer_size = sizeof(*vertices) * vertex_count;
+ const VkDeviceSize index_buffer_size = sizeof(*indices) * index_count;
+
+ struct mesh buf;
+ buffer_create(ren, vertex_buffer_size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &buf.vertex);
+ buffer_create(ren, index_buffer_size, VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &buf.index);
+
+ struct buffer tmp;
+ buffer_create(ren, vertex_buffer_size + index_buffer_size + sizeof(mat4x4), VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &tmp);
+
+ void *data;
+ vkMapMemory(ren->gpu.device, tmp.memory, 0, vertex_buffer_size + index_buffer_size + sizeof(mat4x4), 0, &data);
+ memcpy(data, vertices, vertex_buffer_size);
+ memcpy((char *)data + vertex_buffer_size, indices, index_buffer_size);
+ vkUnmapMemory(ren->gpu.device, tmp.memory);
+
+ VkCommandBuffer cmd = begin_single_command(ren);
+ vkCmdCopyBuffer(cmd, tmp.buffer, buf.vertex.buffer, 1, &(VkBufferCopy) { .size = vertex_buffer_size });
+ vkCmdCopyBuffer(cmd, tmp.buffer, buf.index.buffer, 1, &(VkBufferCopy) { .size = index_buffer_size, .srcOffset = vertex_buffer_size });
+ end_single_command(ren, cmd);
+
+ buffer_destroy(ren, &tmp);
+
+ buf.index_count = index_count;
+
+ mat4x4_translate(buf.position, position.x, position.y, position.z);
+
+ return buf;
+}
+
+void mesh_destroy(struct renderer *ren, struct mesh *mesh) {
+ vkDeviceWaitIdle(ren->gpu.device);
+ buffer_destroy(ren, &mesh->vertex);
+ buffer_destroy(ren, &mesh->index);
+}
diff --git a/src/render/renderer.c b/src/render/renderer.c
new file mode 100644
index 0000000..1c1b23c
--- /dev/null
+++ b/src/render/renderer.c
@@ -0,0 +1,889 @@
+#include "render/renderer.h"
+#include "render/util.h"
+#include "render/mesh.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+
+#ifndef VERTEX_SHADER
+#define VERTEX_SHADER ""
+#endif
+
+#ifndef FRAGMENT_SHADER
+#define FRAGMENT_SHADER ""
+#endif
+
+// TODO: add asserts
+
+#define len(X) sizeof(X) / sizeof(*X)
+#define min(X, Y) X < Y ? X : Y
+#define max(X, Y) X > Y ? X : Y
+
+static VkDebugUtilsMessengerEXT debug_messenger;
+
+static VKAPI_ATTR VkBool32 VKAPI_CALL debug_callback(VkDebugUtilsMessageSeverityFlagBitsEXT severity,
+ VkDebugUtilsMessageTypeFlagsEXT type, const VkDebugUtilsMessengerCallbackDataEXT *callback_data, void *data) {
+ fprintf(stderr, "validation layer: %s\n", callback_data->pMessage);
+ return VK_FALSE;
+}
+
+static bool enumarate_phygpus(struct renderer *ren) {
+ vkEnumeratePhysicalDevices(ren->instance, &ren->phy_gpus.cap, NULL);
+ if (ren->phy_gpus.cap == 0)
+ return false;
+
+ ren->phy_gpus.gpus = calloc(ren->phy_gpus.cap, sizeof(*ren->phy_gpus.gpus));
+ VkPhysicalDevice gpus[ren->phy_gpus.cap];
+ vkEnumeratePhysicalDevices(ren->instance, &ren->phy_gpus.cap, gpus);
+
+ for (uint32_t i = 0; i < ren->phy_gpus.cap; i++) {
+ uint32_t count;
+
+ vkEnumerateDeviceExtensionProperties(gpus[i], NULL, &count, NULL);
+ VkExtensionProperties ext_props[count];
+ vkEnumerateDeviceExtensionProperties(gpus[i], NULL, &count, ext_props);
+
+ bool swapchain = false;
+ for (size_t i = 0; i < count; i++)
+ if (strcmp(ext_props[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0) {
+ swapchain = true;
+ break;
+ }
+
+ if (!swapchain)
+ continue;
+
+ vkGetPhysicalDeviceQueueFamilyProperties(gpus[i], &count, NULL);
+ VkQueueFamilyProperties queue_props[count];
+ vkGetPhysicalDeviceQueueFamilyProperties(gpus[i], &count, queue_props);
+
+ ssize_t gfx_queue = -1, present_queue = -1;
+
+ for (size_t q = 0; q < count; q++) {
+ if ((queue_props[q].queueFlags & VK_QUEUE_GRAPHICS_BIT)) {
+ gfx_queue = q;
+ }
+
+ VkBool32 present_support = false;
+ vkGetPhysicalDeviceSurfaceSupportKHR(gpus[i], q, ren->surface, &present_support);
+
+ if (present_support)
+ present_queue = q;
+ }
+
+ if (gfx_queue == -1 || present_queue == -1)
+ continue;
+
+ struct surface_caps caps;
+
+ vkGetPhysicalDeviceSurfaceFormatsKHR(gpus[i], ren->surface, &caps.formats.len, NULL);
+ if (caps.formats.len == 0)
+ return false;
+ vkGetPhysicalDeviceSurfacePresentModesKHR(gpus[i], ren->surface, &caps.present_modes.len, NULL);
+ if (caps.present_modes.len == 0)
+ return false;
+
+ vkGetPhysicalDeviceSurfaceCapabilitiesKHR(gpus[i], ren->surface, &caps.caps);
+ vkGetPhysicalDeviceSurfaceFormatsKHR(gpus[i], ren->surface,
+ &caps.formats.len, (caps.formats.data = calloc(caps.formats.len, sizeof(*caps.formats.data))));
+ vkGetPhysicalDeviceSurfacePresentModesKHR(gpus[i], ren->surface,
+ &caps.present_modes.len, (caps.present_modes.data = calloc(caps.present_modes.len, sizeof(*caps.present_modes.data))));
+
+ ren->phy_gpus.gpus[ren->phy_gpus.len++] = (struct phy_gpu) {
+ .gpu = gpus[i],
+ .graphics_queue = gfx_queue,
+ .present_queue = present_queue,
+ .surface_caps = caps
+ };
+
+ if (!ren->phy_gpus.chosen) {
+ VkPhysicalDeviceProperties props;
+ vkGetPhysicalDeviceProperties(gpus[i], &props);
+ if (props.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU)
+ ren->phy_gpus.chosen = &ren->phy_gpus.gpus[ren->phy_gpus.len - 1];
+ }
+ }
+
+ if (ren->phy_gpus.len == 0)
+ return false;
+
+ if (!ren->phy_gpus.chosen)
+ ren->phy_gpus.chosen = &ren->phy_gpus.gpus[0];
+
+ return true;
+}
+
+static bool create_device(struct renderer *ren) {
+ float queue_prio = 1.0f;
+ VkDeviceQueueCreateInfo queue_infos[] = {
+ {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
+ .queueFamilyIndex = ren->phy_gpus.chosen->graphics_queue,
+ .queueCount = 1,
+ .pQueuePriorities = &queue_prio
+ },
+ {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
+ .queueFamilyIndex = ren->phy_gpus.chosen->present_queue,
+ .queueCount = 1,
+ .pQueuePriorities = &queue_prio
+ }
+ };
+
+ const char * const ext = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
+
+ VkDeviceCreateInfo create_info = {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
+ .pQueueCreateInfos = queue_infos,
+ .queueCreateInfoCount = len(queue_infos),
+ .enabledExtensionCount = 1,
+ .ppEnabledExtensionNames = &ext
+ };
+
+ if (vkCreateDevice(ren->phy_gpus.chosen->gpu, &create_info, NULL, &ren->gpu.device) != VK_SUCCESS)
+ return false;
+
+ vkGetDeviceQueue(ren->gpu.device, ren->phy_gpus.chosen->graphics_queue, 0, &ren->gpu.gfx_queue);
+ vkGetDeviceQueue(ren->gpu.device, ren->phy_gpus.chosen->present_queue, 0, &ren->gpu.present_queue);
+
+ return true;
+}
+
+static VkSurfaceFormatKHR pick_swapchain_format(const struct surface_caps *caps) {
+ assert(caps && caps->formats.data && caps->formats.len > 1);
+ for (size_t i = 0; i < caps->formats.len; i++) {
+ VkSurfaceFormatKHR format = caps->formats.data[i];
+ if (format.format == VK_FORMAT_B8G8R8A8_SRGB && format.colorSpace == VK_COLORSPACE_SRGB_NONLINEAR_KHR)
+ return format;
+ }
+ return caps->formats.data[0];
+}
+
+static VkPresentModeKHR pick_present_mode(const struct surface_caps *caps) {
+ assert(caps && caps->present_modes.data);
+ for (size_t i = 0; i < caps->present_modes.len; i++) {
+ VkPresentModeKHR mode = caps->present_modes.data[i];
+ if (mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)
+ return mode;
+ }
+ return VK_PRESENT_MODE_FIFO_KHR;
+}
+
+static VkResult create_renderpass(struct renderer *ren) {
+ VkAttachmentDescription color_attach = {
+ .format = ren->swapchain.format,
+ .samples = VK_SAMPLE_COUNT_1_BIT,
+ .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
+ .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+ .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
+ .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
+ .finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR
+ };
+
+ VkAttachmentReference color_attach_ref = {
+ .attachment = 0,
+ .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ };
+
+ VkAttachmentDescription depth_attach = {
+ .format = VK_FORMAT_D32_SFLOAT,
+ .samples = VK_SAMPLE_COUNT_1_BIT,
+ .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
+ .storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
+ .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
+ .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
+ .finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
+ };
+
+ VkAttachmentReference depth_attach_ref = {
+ .attachment = 1,
+ .layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
+ };
+
+ VkSubpassDescription subpass = {
+ .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
+ .colorAttachmentCount = 1,
+ .pColorAttachments = &color_attach_ref,
+ .pDepthStencilAttachment = &depth_attach_ref
+ };
+
+ VkSubpassDependency dep = {
+ .srcSubpass = VK_SUBPASS_EXTERNAL,
+ .dstSubpass = 0,
+ .srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
+ .srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
+ .dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
+ .dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT
+ };
+
+ VkAttachmentDescription attachs[] = { color_attach, depth_attach };
+
+ VkRenderPassCreateInfo create_info = {
+ .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+ .attachmentCount = len(attachs),
+ .pAttachments = attachs,
+ .subpassCount = 1,
+ .pSubpasses = &subpass,
+ .dependencyCount = 1,
+ .pDependencies = &dep
+ };
+
+ return vkCreateRenderPass(ren->gpu.device, &create_info, NULL, &ren->render_pass);
+}
+
+static bool create_swapchain(struct renderer *ren) {
+ struct surface_caps *caps = &ren->phy_gpus.chosen->surface_caps;
+ VkSurfaceFormatKHR format = pick_swapchain_format(caps);
+ ren->swapchain.format = format.format;
+ ren->swapchain.extent = caps->caps.currentExtent.width != UINT32_MAX ? caps->caps.currentExtent :
+ (VkExtent2D) {
+ .width = ren->win->width,
+ .height = ren->win->height
+ };
+
+ uint32_t image_count = caps->caps.minImageCount + 1;
+ if (caps->caps.maxImageCount > 0 && image_count > caps->caps.maxImageCount)
+ image_count = caps->caps.maxImageCount;
+
+ // TODO: understand those values
+ VkSwapchainCreateInfoKHR create_info = {
+ .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
+ .surface = ren->surface,
+ .minImageCount = image_count,
+ .imageFormat = ren->swapchain.format,
+ .imageColorSpace = format.colorSpace,
+ .imageExtent = ren->swapchain.extent,
+ .imageArrayLayers = 1,
+ .imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
+ .preTransform = caps->caps.currentTransform,
+ .compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
+ .presentMode = pick_present_mode(caps),
+ .clipped = VK_TRUE,
+ };
+
+ uint32_t queue_families[] = { ren->phy_gpus.chosen->present_queue, ren->phy_gpus.chosen->graphics_queue };
+ if (queue_families[0] == queue_families[1]) {
+ create_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
+ create_info.queueFamilyIndexCount = 2;
+ create_info.pQueueFamilyIndices = queue_families;
+ } else {
+ create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ }
+
+ VkResult res = vkCreateSwapchainKHR(ren->gpu.device, &create_info, NULL, &ren->swapchain.swapchain);
+ if (res != VK_SUCCESS)
+ return false;
+
+ if (ren->render_pass == VK_NULL_HANDLE && create_renderpass(ren) != VK_SUCCESS)
+ return false;
+
+ vkGetSwapchainImagesKHR(ren->gpu.device, ren->swapchain.swapchain, &ren->swapchain.images.len, NULL);
+ ren->swapchain.images.data = calloc(ren->swapchain.images.len, sizeof(*ren->swapchain.images.data));
+ VkImage images[ren->swapchain.images.len];
+ vkGetSwapchainImagesKHR(ren->gpu.device, ren->swapchain.swapchain, &ren->swapchain.images.len, images);
+
+ if (create_image(ren, ren->swapchain.extent, 1, VK_FORMAT_D32_SFLOAT, VK_IMAGE_TILING_OPTIMAL,
+ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
+ &ren->depth.image, &ren->depth.memory) != VK_SUCCESS)
+ return false;
+
+ if (create_image_view(ren->gpu.device, ren->depth.image,
+ VK_FORMAT_D32_SFLOAT, VK_IMAGE_ASPECT_DEPTH_BIT, &ren->depth.view) != VK_SUCCESS)
+ return false;
+
+ for (size_t i = 0; i < ren->swapchain.images.len; i++) {
+ ren->swapchain.images.data->image = images[i];
+ if (create_image_view(ren->gpu.device, images[i], ren->swapchain.format,
+ VK_IMAGE_ASPECT_COLOR_BIT, &ren->swapchain.images.data[i].view) != VK_SUCCESS)
+ return false;
+ VkImageView attachs[] = { ren->swapchain.images.data[i].view, ren->depth.view };
+ VkFramebufferCreateInfo fb_info = {
+ .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+ .renderPass = ren->render_pass,
+ .attachmentCount = len(attachs),
+ .pAttachments = attachs,
+ .width = ren->swapchain.extent.width,
+ .height = ren->swapchain.extent.height,
+ .layers = 1
+ };
+ if (vkCreateFramebuffer(ren->gpu.device, &fb_info, NULL,
+ &ren->swapchain.images.data[i].framebuffer) != VK_SUCCESS)
+ return false;
+ }
+
+ return true;
+}
+
+// TODO: understand better the options for each stage
+static bool create_pipeline(struct renderer *ren, size_t shader_count,
+ struct shader shaders[shader_count], struct shader_input *input) {
+ VkPipelineShaderStageCreateInfo shader_stages[shader_count];
+ VkShaderModule modules[shader_count];
+ for (size_t i = 0; i < shader_count; i++) {
+ shader_create_module(ren, shaders[i].len, shaders[i].code, &modules[i]);
+ shader_stages[i] = (VkPipelineShaderStageCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ .stage = shaders[i].stage,
+ .module = modules[i],
+ .pName = "main"
+ };
+ }
+
+ VkPipelineVertexInputStateCreateInfo vertex_input_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+ .vertexBindingDescriptionCount = input->desc_count,
+ .pVertexBindingDescriptions = input->descs,
+ .vertexAttributeDescriptionCount = input->attr_count,
+ .pVertexAttributeDescriptions = input->attrs
+ };
+
+ VkPipelineInputAssemblyStateCreateInfo input_assembly = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
+ .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
+ .primitiveRestartEnable = VK_FALSE
+ };
+
+ VkPipelineDynamicStateCreateInfo dynamic_state_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
+ .dynamicStateCount = 2,
+ .pDynamicStates = (VkDynamicState[]){
+ VK_DYNAMIC_STATE_VIEWPORT,
+ VK_DYNAMIC_STATE_SCISSOR
+ }
+ };
+
+ // FIXME: should this exist? we're using dynamic viewports and scissors
+ VkPipelineViewportStateCreateInfo viewport_state = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+ .viewportCount = 1,
+ .scissorCount = 1
+ };
+
+ VkPipelineRasterizationStateCreateInfo rasterizer = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
+ .depthClampEnable = VK_FALSE,
+ .rasterizerDiscardEnable = VK_FALSE,
+ .polygonMode = VK_POLYGON_MODE_FILL,
+ .lineWidth = 1.0f,
+ .cullMode = VK_CULL_MODE_BACK_BIT,
+ .frontFace = VK_FRONT_FACE_CLOCKWISE,
+ .depthBiasEnable = VK_FALSE,
+ };
+
+ VkPipelineMultisampleStateCreateInfo multisampling = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+ .sampleShadingEnable = VK_FALSE,
+ .rasterizationSamples = VK_SAMPLE_COUNT_1_BIT,
+ .minSampleShading = 1.0f,
+ };
+
+ VkPipelineColorBlendAttachmentState color_state = {
+ .colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT,
+ .blendEnable = VK_FALSE
+ };
+
+ VkPipelineColorBlendStateCreateInfo color_blending = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
+ .logicOpEnable = VK_FALSE,
+ .attachmentCount = 1,
+ .pAttachments = &color_state
+ };
+
+ VkPipelineDepthStencilStateCreateInfo depth_stencil = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
+ .depthTestEnable = VK_TRUE,
+ .depthWriteEnable = VK_TRUE,
+ .depthCompareOp = VK_COMPARE_OP_LESS,
+ .depthBoundsTestEnable = VK_FALSE,
+ .minDepthBounds = 0.0f,
+ .maxDepthBounds = 1.0,
+ .stencilTestEnable = VK_FALSE,
+ };
+
+ VkDescriptorSetLayout layouts[] = {
+ ren->descriptor.layout,
+ };
+
+ VkPushConstantRange range = {
+ .stageFlags = VK_SHADER_STAGE_VERTEX_BIT,
+ .size = sizeof(mat4x4)
+ };
+
+ VkPipelineLayoutCreateInfo pipeline_layout_create_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ .setLayoutCount = len(layouts),
+ .pSetLayouts = layouts,
+ .pushConstantRangeCount = 1,
+ .pPushConstantRanges = &range
+ };
+
+ if (vkCreatePipelineLayout(ren->gpu.device, &pipeline_layout_create_info, NULL, &ren->pipeline.layout) != VK_SUCCESS)
+ return false;
+
+ VkGraphicsPipelineCreateInfo pipeline_info = {
+ .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+ .stageCount = shader_count,
+ .pStages = shader_stages,
+ .pVertexInputState = &vertex_input_info,
+ .pInputAssemblyState = &input_assembly,
+ .pViewportState = &viewport_state,
+ .pRasterizationState = &rasterizer,
+ .pMultisampleState = &multisampling,
+ .pColorBlendState = &color_blending,
+ .pDynamicState = &dynamic_state_info,
+ .pDepthStencilState = &depth_stencil,
+ .layout = ren->pipeline.layout,
+ .renderPass = ren->render_pass,
+ .subpass = 0,
+ .basePipelineHandle = VK_NULL_HANDLE,
+ .basePipelineIndex = -1
+ };
+
+ if (vkCreateGraphicsPipelines(ren->gpu.device, VK_NULL_HANDLE, 1, &pipeline_info, NULL, &ren->pipeline.gfx) != VK_SUCCESS)
+ return false;
+
+ for (size_t i = 0; i < shader_count; i++)
+ vkDestroyShaderModule(ren->gpu.device, modules[i], NULL);
+
+ return true;
+}
+
+static VkResult create_command_pool(struct renderer *ren) {
+ VkCommandPoolCreateInfo pool_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
+ .flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
+ .queueFamilyIndex = ren->phy_gpus.chosen->graphics_queue
+ };
+
+ VkResult res = vkCreateCommandPool(ren->gpu.device, &pool_info, NULL, &ren->command.pool);
+ if (res != VK_SUCCESS)
+ return res;
+
+ VkCommandBufferAllocateInfo alloc_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ .commandPool = ren->command.pool,
+ .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ .commandBufferCount = MAX_FRAMES
+ };
+
+ return vkAllocateCommandBuffers(ren->gpu.device, &alloc_info, ren->command.buffers);
+}
+
+static bool create_sync_objects(struct renderer *ren) {
+ VkSemaphoreCreateInfo semaphore_info = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO
+ };
+ VkFenceCreateInfo fence_info = {
+ .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+ .flags = VK_FENCE_CREATE_SIGNALED_BIT
+ };
+ for (size_t i = 0; i < MAX_FRAMES; i++)
+ if (vkCreateSemaphore(ren->gpu.device, &semaphore_info, NULL, &ren->locks.image_available[i]) != VK_SUCCESS
+ || vkCreateSemaphore(ren->gpu.device, &semaphore_info, NULL, &ren->locks.render_finished[i]) != VK_SUCCESS
+ || vkCreateFence(ren->gpu.device, &fence_info, NULL, &ren->locks.in_flight[i]) != VK_SUCCESS)
+ return false;
+ return true;
+}
+
+static bool create_descriptor_sets(struct renderer *ren) {
+ VkDescriptorPoolCreateInfo pool_info = {
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
+ .maxSets = MAX_FRAMES + 15,
+ .poolSizeCount = 2,
+ .pPoolSizes = (VkDescriptorPoolSize[]) {
+ {
+ .type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
+ .descriptorCount = MAX_FRAMES
+ },
+ {
+ .type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
+ .descriptorCount = 15
+ }
+ }
+ };
+
+ if (vkCreateDescriptorPool(ren->gpu.device, &pool_info, NULL, &ren->descriptor.pool) != VK_SUCCESS)
+ return false;
+
+ VkDescriptorSetLayoutBinding layout_bind[] = {
+ {
+ .binding = 0,
+ .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_VERTEX_BIT
+ }
+ };
+
+ VkDescriptorSetLayoutCreateInfo desc_create_info = {
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+ .bindingCount = len(layout_bind),
+ .pBindings = layout_bind
+ };
+
+ if(vkCreateDescriptorSetLayout(ren->gpu.device, &desc_create_info, NULL, &ren->descriptor.layout) != VK_SUCCESS)
+ return false;
+
+ VkDescriptorSetLayout layouts[] = {
+ ren->descriptor.layout,
+ ren->descriptor.layout
+ };
+
+ VkDescriptorSetAllocateInfo alloc_info = {
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
+ .descriptorPool = ren->descriptor.pool,
+ .descriptorSetCount = MAX_FRAMES,
+ .pSetLayouts = layouts
+ };
+
+ if (vkAllocateDescriptorSets(ren->gpu.device, &alloc_info, ren->descriptor.sets) != VK_SUCCESS)
+ return false;
+
+ for (size_t i = 0; i < MAX_FRAMES; i++) {
+ VkDescriptorBufferInfo buffer_info = {
+ .buffer = ren->uniform[i].buffer.buffer,
+ .offset = 0,
+ .range = sizeof(struct ubo)
+ };
+ VkWriteDescriptorSet desc_write[] = {
+ {
+ .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ .dstSet = ren->descriptor.sets[i],
+ .dstBinding = 0,
+ .dstArrayElement = 0,
+ .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
+ .descriptorCount = 1,
+ .pBufferInfo = &buffer_info,
+ },
+ };
+
+ vkUpdateDescriptorSets(ren->gpu.device, len(desc_write), desc_write, 0, NULL);
+ }
+
+ return true;
+}
+
+struct renderer *renderer_init(struct window *win) {
+ struct renderer *ren = calloc(1, sizeof(*ren));
+ ren->win = win;
+ VkApplicationInfo app_info = {
+ .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
+ .pApplicationName = win->title,
+ .applicationVersion = VK_MAKE_VERSION(1, 0, 0),
+ .pEngineName = "void",
+ .engineVersion = VK_MAKE_VERSION(1, 0, 0),
+ .apiVersion = VK_API_VERSION_1_0
+ };
+
+ // TODO: query window
+ const char *exts[] = {
+ VK_KHR_SURFACE_EXTENSION_NAME,
+ VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
+ VK_EXT_DEBUG_UTILS_EXTENSION_NAME
+ };
+
+ const char *validation_layers[] = { "VK_LAYER_KHRONOS_validation" };
+
+ VkInstanceCreateInfo create_info = {
+ .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
+ .pApplicationInfo = &app_info,
+ .enabledExtensionCount = len(exts),
+ .ppEnabledExtensionNames = exts,
+ .enabledLayerCount = 1,
+ .ppEnabledLayerNames = validation_layers
+ };
+
+ if (vkCreateInstance(&create_info, NULL, &ren->instance) != VK_SUCCESS)
+ goto err;
+
+ VkDebugUtilsMessengerCreateInfoEXT debug_create_info = {
+ .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,
+ .messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT
+ | VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT
+ | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT,
+ .messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT
+ | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT
+ | VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT,
+ .pfnUserCallback = debug_callback
+ };
+
+ ((PFN_vkCreateDebugUtilsMessengerEXT)
+ vkGetInstanceProcAddr(ren->instance, "vkCreateDebugUtilsMessengerEXT"))(ren->instance, &debug_create_info, NULL, &debug_messenger);
+
+ ren->surface = window_create_vk_surface(ren->instance, win);
+ if (ren->surface == VK_NULL_HANDLE)
+ goto err;
+
+ if (!enumarate_phygpus(ren))
+ goto err;
+
+ if (!create_device(ren))
+ goto err;
+
+ if (!create_swapchain(ren))
+ goto err;
+
+ if (create_command_pool(ren) != VK_SUCCESS)
+ goto err;
+
+ if (!create_sync_objects(ren))
+ goto err;
+
+ struct shader shaders[2];
+ if (!shader_load(VERTEX_SHADER, &shaders[0]))
+ goto err;
+
+ shaders[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
+
+ if (!shader_load(FRAGMENT_SHADER, &shaders[1]))
+ goto err;
+
+ shaders[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
+
+ struct shader_input input = {
+ .desc_count = 1,
+ .descs = &(VkVertexInputBindingDescription) {
+ .binding = 0,
+ .stride = sizeof(struct vertex),
+ .inputRate = VK_VERTEX_INPUT_RATE_VERTEX
+ },
+ .attr_count = 3,
+ .attrs = (VkVertexInputAttributeDescription[3]) {
+ {
+ .binding = 0,
+ .location = 0,
+ .format = VK_FORMAT_R32G32B32_SFLOAT,
+ .offset = offsetof(struct vertex, position)
+ },
+ {
+ .binding = 0,
+ .location = 1,
+ .format = VK_FORMAT_R32G32B32_SFLOAT,
+ .offset = offsetof(struct vertex, normal)
+ },
+ {
+ .binding = 0,
+ .location = 2,
+ .format = VK_FORMAT_R32G32B32A32_SFLOAT,
+ .offset = offsetof(struct vertex, color)
+ }
+ }
+ };
+
+ for (size_t i = 0; i < MAX_FRAMES; i++) {
+ buffer_create(ren, sizeof(struct ubo), VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &ren->uniform[i].buffer);
+ vkMapMemory(ren->gpu.device, ren->uniform[i].buffer.memory, 0, sizeof(struct ubo), 0, &ren->uniform[i].data);
+ }
+
+ if (!create_descriptor_sets(ren))
+ goto err;
+
+ if (!create_pipeline(ren, len(shaders), shaders, &input))
+ goto err;
+
+ goto out;
+err:
+ ren = (renderer_destroy(ren), NULL);
+out:
+ return ren;
+}
+
+void image_destroy(struct renderer *ren, struct image *img) {
+ vkDestroyImageView(ren->gpu.device, img->view, NULL);
+ vkDestroyImage(ren->gpu.device, img->image, NULL);
+ vkFreeMemory(ren->gpu.device, img->memory, NULL);
+}
+
+void swapchain_destroy(struct renderer *ren) {
+ image_destroy(ren, &ren->depth);
+
+ for (size_t i = 0; i < ren->swapchain.images.len; i++) {
+ vkDestroyFramebuffer(ren->gpu.device, ren->swapchain.images.data[i].framebuffer, NULL);
+ vkDestroyImageView(ren->gpu.device, ren->swapchain.images.data[i].view, NULL);
+ }
+ free(ren->swapchain.images.data);
+ vkDestroySwapchainKHR(ren->gpu.device, ren->swapchain.swapchain, NULL);
+}
+
+void renderer_destroy(struct renderer *ren) {
+ if (!ren)
+ return;
+ if (ren->gpu.device) {
+ vkDeviceWaitIdle(ren->gpu.device);
+ for (size_t i = 0; i < MAX_FRAMES; i++) {
+ vkDestroySemaphore(ren->gpu.device, ren->locks.image_available[i], NULL);
+ vkDestroySemaphore(ren->gpu.device, ren->locks.render_finished[i], NULL);
+ vkDestroyFence(ren->gpu.device, ren->locks.in_flight[i], NULL);
+ buffer_destroy(ren, &ren->uniform[i].buffer);
+ }
+
+ swapchain_destroy(ren);
+
+
+ vkDestroySampler(ren->gpu.device, ren->sampler, NULL);
+
+ vkDestroyDescriptorPool(ren->gpu.device, ren->descriptor.pool, NULL);
+ vkDestroyDescriptorSetLayout(ren->gpu.device, ren->descriptor.layout, NULL);
+
+ vkDestroyCommandPool(ren->gpu.device, ren->command.pool, NULL);
+
+ vkDestroyPipeline(ren->gpu.device, ren->pipeline.gfx, NULL);
+ vkDestroyPipelineLayout(ren->gpu.device, ren->pipeline.layout, NULL);
+
+ vkDestroyRenderPass(ren->gpu.device, ren->render_pass, NULL);
+
+ vkDestroyDevice(ren->gpu.device, NULL);
+ }
+
+ if (ren->instance) {
+ vkDestroySurfaceKHR(ren->instance, ren->surface, NULL);
+
+ ((PFN_vkDestroyDebugUtilsMessengerEXT)
+ vkGetInstanceProcAddr(ren->instance, "vkDestroyDebugUtilsMessengerEXT"))(ren->instance, debug_messenger, NULL);
+ vkDestroyInstance(ren->instance, NULL);
+ }
+
+ free(ren);
+}
+
+/* FIXME: hacks */
+static void update_ubo(struct renderer *ren, uint32_t frame) {
+ struct ubo ubo = { 0 };
+ static double rotate = 0.1;
+ mat4x4 id;
+ mat4x4_identity(id);
+ mat4x4_rotate(ubo.model, id, 0, 0, 1.0f, rotate * 90);
+ mat4x4_look_at(ubo.view, (vec3) {2, 2, 2}, (vec3) {0, 0, 0}, (vec3) {0, 0, 1});
+ mat4x4_perspective(ubo.proj, 45, ren->swapchain.extent.width / (float) ren->swapchain.extent.height, 0.1f, 10.0f);
+ ubo.proj[1][1] *= -1;
+
+ memcpy(ren->uniform[frame].data, &ubo, sizeof(ubo));
+ rotate += 0.0001;
+}
+
+void renderer_draw(struct renderer *ren, size_t mesh_count, struct mesh meshes[mesh_count]) {
+ uint32_t frame = ren->current_frame;
+ vkWaitForFences(ren->gpu.device, 1, &ren->locks.in_flight[frame], VK_TRUE, UINT64_MAX);
+
+ uint32_t image_index;
+ switch(vkAcquireNextImageKHR(ren->gpu.device, ren->swapchain.swapchain, UINT64_MAX,
+ ren->locks.image_available[frame], VK_NULL_HANDLE, &image_index)) {
+ case VK_SUCCESS:
+ case VK_SUBOPTIMAL_KHR:
+ break;
+ case VK_ERROR_OUT_OF_DATE_KHR:
+ vkDeviceWaitIdle(ren->gpu.device);
+ swapchain_destroy(ren);
+ create_swapchain(ren);
+ default:
+ return;
+ }
+
+ vkResetFences(ren->gpu.device, 1, &ren->locks.in_flight[frame]);
+ vkResetCommandBuffer(ren->command.buffers[frame], 0);
+
+ // record cmd buffer
+
+ if (vkBeginCommandBuffer(ren->command.buffers[frame], &(VkCommandBufferBeginInfo)
+ { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO }) != VK_SUCCESS)
+ return;
+
+
+ VkClearValue clear_color[] = {
+ { .color = {{ 0.0f, 0.0f, 0.0f, 1.0f }}},
+ { .depthStencil = { 1.0f, 0 }}
+ };
+ VkRenderPassBeginInfo render_pass_info = {
+ .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
+ .renderPass = ren->render_pass,
+ .framebuffer = ren->swapchain.images.data[image_index].framebuffer,
+ .renderArea = {
+ .extent = ren->swapchain.extent,
+ .offset = {0, 0}
+ },
+ .clearValueCount = len(clear_color),
+ .pClearValues = clear_color
+ };
+
+ vkCmdBeginRenderPass(ren->command.buffers[frame], &render_pass_info, VK_SUBPASS_CONTENTS_INLINE);
+ vkCmdBindPipeline(ren->command.buffers[frame], VK_PIPELINE_BIND_POINT_GRAPHICS, ren->pipeline.gfx);
+
+ VkViewport viewport = {
+ .x = 0.0f,
+ .y = 0.0f,
+ .width = ren->swapchain.extent.width,
+ .height = ren->swapchain.extent.height,
+ .minDepth = 0.0f,
+ .maxDepth = 1.0f,
+ };
+ vkCmdSetViewport(ren->command.buffers[frame], 0, 1, &viewport);
+
+ VkRect2D scissor = {
+ .offset = {0, 0},
+ .extent = ren->swapchain.extent
+ };
+
+ vkCmdSetScissor(ren->command.buffers[frame], 0, 1, &scissor);
+
+ /* FIXME: hacks */
+ update_ubo(ren, frame);
+
+ vkCmdBindDescriptorSets(ren->command.buffers[frame], VK_PIPELINE_BIND_POINT_GRAPHICS,
+ ren->pipeline.layout, 0, 1, &ren->descriptor.sets[frame], 0, NULL);
+
+ for (size_t i = 0; i < mesh_count; i++) {
+ vkCmdBindVertexBuffers(ren->command.buffers[frame], 0, 1, &meshes[i].vertex.buffer, (VkDeviceSize[]){ 0 });
+ vkCmdBindIndexBuffer(ren->command.buffers[frame], meshes[i].index.buffer, 0, VK_INDEX_TYPE_UINT32);
+ vkCmdPushConstants(ren->command.buffers[frame], ren->pipeline.layout,
+ VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(mat4x4), meshes[i].position);
+
+ vkCmdDrawIndexed(ren->command.buffers[frame], meshes[i].index_count, 1, 0, 0, 0);
+ }
+
+ vkCmdEndRenderPass(ren->command.buffers[frame]);
+
+ if (vkEndCommandBuffer(ren->command.buffers[frame]) != VK_SUCCESS)
+ return;
+
+ VkSubmitInfo submit = {
+ .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ .waitSemaphoreCount = 1,
+ .pWaitSemaphores = &ren->locks.image_available[frame],
+ .pWaitDstStageMask = (VkPipelineStageFlags[]) { VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT },
+ .signalSemaphoreCount = 1,
+ .pSignalSemaphores = &ren->locks.render_finished[frame],
+ .commandBufferCount = 1,
+ .pCommandBuffers = &ren->command.buffers[frame]
+ };
+
+ if (vkQueueSubmit(ren->gpu.gfx_queue, 1, &submit, ren->locks.in_flight[frame]) != VK_SUCCESS)
+ return;
+
+ VkPresentInfoKHR present = {
+ .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
+ .waitSemaphoreCount = 1,
+ .pWaitSemaphores = &ren->locks.render_finished[frame],
+ .swapchainCount = 1,
+ .pSwapchains = &ren->swapchain.swapchain,
+ .pImageIndices = &image_index
+ };
+
+ switch (vkQueuePresentKHR(ren->gpu.present_queue, &present)) {
+ case VK_SUCCESS:
+ break;
+ case VK_SUBOPTIMAL_KHR:
+ case VK_ERROR_OUT_OF_DATE_KHR:
+ vkDeviceWaitIdle(ren->gpu.device);
+ swapchain_destroy(ren);
+ create_swapchain(ren);
+ default:
+ return;
+ }
+
+ ren->current_frame = (ren->current_frame + 1) % MAX_FRAMES;
+}
diff --git a/src/render/shader.c b/src/render/shader.c
new file mode 100644
index 0000000..c16e2db
--- /dev/null
+++ b/src/render/shader.c
@@ -0,0 +1,31 @@
+#include "render/shader.h"
+#include "render/renderer.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+VkResult shader_create_module(struct renderer *ren, const size_t len, uint8_t code[len], VkShaderModule *module) {
+ return vkCreateShaderModule(ren->gpu.device, &(VkShaderModuleCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
+ .codeSize = len,
+ .pCode = (uint32_t *)code
+ }, NULL, module);
+}
+
+bool shader_load(const char *filename, struct shader *shader) {
+ assert(filename);
+ assert(shader);
+ FILE *fp = fopen(filename, "rb");
+ if (!fp)
+ return false;
+ fseek(fp, 0, SEEK_END);
+ shader->len = ftell(fp);
+ rewind(fp);
+
+ shader->code = calloc(shader->len, sizeof(*shader->code));
+ fread(shader->code, sizeof(*shader->code), shader->len, fp);
+
+ fclose(fp);
+ return true;
+}
diff --git a/src/render/util.c b/src/render/util.c
new file mode 100644
index 0000000..7a453a0
--- /dev/null
+++ b/src/render/util.c
@@ -0,0 +1 @@
+#include "render/util.h"