summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/buffer.c195
-rw-r--r--src/buffer.h41
-rw-r--r--src/pipeline.c347
-rw-r--r--src/pipeline.h39
-rw-r--r--src/renderer.c645
-rw-r--r--src/renderer.h97
-rw-r--r--src/utils.c38
-rw-r--r--src/utils.h71
-rw-r--r--src/window.c177
-rw-r--r--src/window.h35
10 files changed, 1685 insertions, 0 deletions
diff --git a/src/buffer.c b/src/buffer.c
new file mode 100644
index 0000000..81e8ae6
--- /dev/null
+++ b/src/buffer.c
@@ -0,0 +1,195 @@
+#include "renderer.h"
+#include "buffer.h"
+
+#include <string.h>
+
+static inline ssize_t find_memory_type(struct vlkn_renderer *ren, uint32_t filter, VkMemoryPropertyFlags props) {
+ VkPhysicalDeviceMemoryProperties mem_props;
+ vkGetPhysicalDeviceMemoryProperties(ren->phy_gpus.chosen->gpu, &mem_props);
+
+ for (size_t i = 0; i < mem_props.memoryTypeCount; i++)
+ if (filter & (1 << i) && (mem_props.memoryTypes[i].propertyFlags & props) == props)
+ return i;
+
+ return -1;
+}
+
+VkResult buffer_create(struct vlkn_renderer *ren, VkDeviceSize size,
+ VkBufferUsageFlags usage, VkMemoryPropertyFlags props, struct vlkn_buffer *buffer) {
+ VkBufferCreateInfo buffer_info = {
+ .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ .size = size,
+ .usage = usage,
+ .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
+ };
+
+ VkResult res = vkCreateBuffer(ren->gpu.device, &buffer_info, NULL, &buffer->buffer);
+ if (res != VK_SUCCESS)
+ return res;
+
+ VkMemoryRequirements mem_reqs;
+ vkGetBufferMemoryRequirements(ren->gpu.device, buffer->buffer, &mem_reqs);
+
+ ssize_t mem_type_index = find_memory_type(ren, mem_reqs.memoryTypeBits, props);
+ if (mem_type_index == -1)
+ return VK_ERROR_UNKNOWN;
+
+ VkMemoryAllocateInfo alloc_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ .allocationSize = mem_reqs.size,
+ .memoryTypeIndex = mem_type_index
+ };
+
+ res = vkAllocateMemory(ren->gpu.device, &alloc_info, NULL, &buffer->memory);
+ if (res != VK_SUCCESS)
+ return res;
+
+ buffer->size = size;
+ return vkBindBufferMemory(ren->gpu.device, buffer->buffer, buffer->memory, 0);
+}
+
+VkResult buffer_upload(struct vlkn_renderer *ren, struct vlkn_buffer *buffer,
+ size_t offset, size_t size, uint8_t data[size]) {
+ struct vlkn_buffer tmp;
+ size_t end_size = offset + size;
+ // TODO: reallocate buffer?
+ if (end_size > buffer->size) {
+ end_size = buffer->size - offset;
+ }
+ VkResult res = buffer_create(ren, end_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &tmp);
+ if (res != VK_SUCCESS)
+ return res;
+
+ void *map;
+ res = vkMapMemory(ren->gpu.device, tmp.memory, 0, end_size, 0, &map);
+ if (res != VK_SUCCESS)
+ return res;
+ memcpy(map, data, size < buffer->size ? size : end_size);
+ vkUnmapMemory(ren->gpu.device, tmp.memory);
+
+ VkCommandBuffer cmd = begin_single_command(ren);
+ vkCmdCopyBuffer(cmd, tmp.buffer, buffer->buffer, 1, &(VkBufferCopy) { .size = end_size });
+ end_single_command(ren, cmd);
+
+ buffer_destroy(ren, &tmp);
+
+ return VK_SUCCESS;
+}
+
+void buffer_destroy(struct vlkn_renderer *ren, struct vlkn_buffer *buffer) {
+ vkDestroyBuffer(ren->gpu.device, buffer->buffer, NULL);
+ vkFreeMemory(ren->gpu.device, buffer->memory, NULL);
+}
+
+VkResult image_view_create(struct vlkn_renderer *ren, VkFormat format,
+ VkImageAspectFlags aspect, VkImage image, VkImageView *view) {
+ VkImageViewCreateInfo view_info = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .image = image,
+ .viewType = VK_IMAGE_VIEW_TYPE_2D,
+ .format = format,
+ .components = {
+ .r = VK_COMPONENT_SWIZZLE_IDENTITY,
+ .g = VK_COMPONENT_SWIZZLE_IDENTITY,
+ .b = VK_COMPONENT_SWIZZLE_IDENTITY,
+ .a = VK_COMPONENT_SWIZZLE_IDENTITY,
+ },
+ .subresourceRange = {
+ .aspectMask = aspect,
+ .baseMipLevel = 0,
+ .levelCount = 1,
+ .baseArrayLayer = 0,
+ .layerCount = 1
+ }
+ };
+
+ return vkCreateImageView(ren->gpu.device, &view_info, NULL, view);
+}
+
+VkResult image_create(struct vlkn_renderer *ren, struct image_opts opts, struct vlkn_images *image) {
+ VkImageCreateInfo image_info = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+ .imageType = VK_IMAGE_TYPE_2D,
+ .extent = {
+ .width = opts.extent.width,
+ .height = opts.extent.height,
+ .depth = 1
+ },
+ .mipLevels = opts.mip_level,
+ .arrayLayers = 1,
+ .format = opts.format,
+ .tiling = opts.tiling,
+ .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
+ .usage = opts.usage,
+ .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
+ .samples = VK_SAMPLE_COUNT_1_BIT
+ };
+
+ VkResult res = vkCreateImage(ren->gpu.device, &image_info, NULL, &image->image);
+ if (res != VK_SUCCESS)
+ return res;
+
+ VkMemoryRequirements mem_reqs;
+ vkGetImageMemoryRequirements(ren->gpu.device, image->image, &mem_reqs);
+
+ ssize_t mem_type_index = find_memory_type(ren, mem_reqs.memoryTypeBits, opts.mem_props);
+ if (mem_type_index == -1)
+ return VK_ERROR_UNKNOWN;
+
+ VkMemoryAllocateInfo alloc_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ .allocationSize = mem_reqs.size,
+ .memoryTypeIndex = mem_type_index
+ };
+
+ res = vkAllocateMemory(ren->gpu.device, &alloc_info, NULL, &image->memory);
+ if (res != VK_SUCCESS)
+ return res;
+
+ res = vkBindImageMemory(ren->gpu.device, image->image, image->memory, 0);
+ if (res != VK_SUCCESS)
+ return res;
+
+ return image_view_create(ren, opts.format, opts.aspect, image->image, &image->view);
+}
+
+void image_destroy(struct vlkn_renderer *ren, struct vlkn_images *image) {
+ vkDestroyImageView(ren->gpu.device, image->view, NULL);
+ vkDestroyImage(ren->gpu.device, image->image, NULL);
+ vkFreeMemory(ren->gpu.device, image->memory, NULL);
+}
+
+struct vlkn_mesh vlkn_mesh_upload(struct vlkn_renderer *renderer, size_t vertex_count, struct vlkn_vertex vertices[vertex_count],
+ size_t index_count, uint32_t indices[index_count], struct vec3 position) {
+ (void) position;
+ const VkDeviceSize vertex_size = sizeof(*vertices) * vertex_count;
+ const VkDeviceSize index_size = sizeof(*indices) * index_count;
+
+ struct vlkn_mesh mesh;
+ buffer_create(renderer, vertex_size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &mesh.vertex);
+ buffer_create(renderer, index_size, VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &mesh.index);
+
+ struct vlkn_buffer tmp;
+ buffer_create(renderer, vertex_size + index_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &tmp);
+
+ void *data;
+ vkMapMemory(renderer->gpu.device, tmp.memory, 0, vertex_size + index_size, 0, &data);
+ memcpy(data, vertices, vertex_size);
+ memcpy((char *)data + vertex_size, indices, index_size);
+ vkUnmapMemory(renderer->gpu.device, tmp.memory);
+
+ VkCommandBuffer cmd = begin_single_command(renderer);
+ vkCmdCopyBuffer(cmd, tmp.buffer, mesh.vertex.buffer, 1, &(VkBufferCopy) { .size = vertex_size });
+ vkCmdCopyBuffer(cmd, tmp.buffer, mesh.index.buffer, 1, &(VkBufferCopy) { .size = index_size, .srcOffset = vertex_size });
+ end_single_command(renderer, cmd);
+
+ buffer_destroy(renderer, &tmp);
+ mesh.index_count = index_count;
+ mesh.position = position;
+
+ return mesh;
+}
diff --git a/src/buffer.h b/src/buffer.h
new file mode 100644
index 0000000..ebf6ea5
--- /dev/null
+++ b/src/buffer.h
@@ -0,0 +1,41 @@
+#ifndef _BUFFER_H_
+#define _BUFFER_H_
+
+#include <vulkan/vulkan.h>
+#include <vlkn.h>
+
+struct vlkn_renderer;
+
+VkResult buffer_create(struct vlkn_renderer *ren, VkDeviceSize size,
+ VkBufferUsageFlags usage, VkMemoryPropertyFlags props, struct vlkn_buffer *buffer);
+VkResult buffer_upload(struct vlkn_renderer *ren, struct vlkn_buffer *buffer,
+ size_t offset, size_t size, uint8_t data[size]);
+void buffer_destroy(struct vlkn_renderer *ren, struct vlkn_buffer *buffer);
+
+struct buffer_view {
+ struct vlkn_buffer *buffer;
+ size_t offset, stride, count;
+};
+
+struct vlkn_images {
+ VkImage image;
+ VkDeviceMemory memory;
+ VkImageView view;
+};
+
+struct image_opts {
+ VkExtent2D extent;
+ uint32_t mip_level;
+ VkFormat format;
+ VkImageTiling tiling;
+ VkImageUsageFlags usage;
+ VkMemoryPropertyFlags mem_props;
+ VkImageAspectFlags aspect;
+};
+
+VkResult image_view_create(struct vlkn_renderer *ren, VkFormat format,
+ VkImageAspectFlags aspect, VkImage image, VkImageView *view);
+VkResult image_create(struct vlkn_renderer *ren, struct image_opts opts, struct vlkn_images *image);
+void image_destroy(struct vlkn_renderer *ren, struct vlkn_images *image);
+
+#endif
diff --git a/src/pipeline.c b/src/pipeline.c
new file mode 100644
index 0000000..f06ba7b
--- /dev/null
+++ b/src/pipeline.c
@@ -0,0 +1,347 @@
+#include <vlkn.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include "pipeline.h"
+#include "utils.h"
+#include "renderer.h"
+
+struct descriptor_allocator descriptor_init(uint32_t sets, size_t ratio_count, struct pool_ratio ratios[ratio_count]) {
+ struct descriptor_allocator alloc = {
+ .set_count = sets,
+ .ratio_count = ratio_count,
+ .ratios = calloc(ratio_count, sizeof(*ratios))
+ };
+ if (!alloc.ratios)
+ abort();
+ memcpy(alloc.ratios, ratios, sizeof(*ratios) * ratio_count);
+ array_init(alloc.ready);
+ array_init(alloc.full);
+ return alloc;
+}
+
+static VkDescriptorPool create_pool(VkDevice dev, struct descriptor_allocator *allocator) {
+ VkDescriptorPoolSize pool_sizes[allocator->ratio_count];
+ for (size_t i = 0; i < allocator->ratio_count; i++) {
+ pool_sizes[i] = (VkDescriptorPoolSize) {
+ .type = allocator->ratios[i].type,
+ .descriptorCount = allocator->ratios[i].ratio * allocator->set_count
+ };
+ }
+
+ VkDescriptorPoolCreateInfo pool_info = {
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
+ .maxSets = allocator->set_count,
+ .poolSizeCount = allocator->ratio_count,
+ .pPoolSizes = pool_sizes
+ };
+
+ VkDescriptorPool new_pool;
+ // FIXME: verify result
+ vkCreateDescriptorPool(dev, &pool_info, NULL, &new_pool);
+ return new_pool;
+}
+
+static VkDescriptorPool get_pool(VkDevice dev, struct descriptor_allocator *allocator) {
+ if (allocator->ready.len > 0) {
+ assert(allocator->ready.data);
+ return allocator->ready.data[--allocator->ready.len];
+ }
+
+ VkDescriptorPool pool = create_pool(dev, allocator);
+ allocator->set_count *= 1.5;
+ if (allocator->set_count > 4092)
+ allocator->set_count = 4092;
+
+ return pool;
+}
+
+VkDescriptorSet descriptor_allocate(struct descriptor_allocator *alloc, VkDevice dev, VkDescriptorSetLayout layout) {
+ VkDescriptorPool pool = get_pool(dev, alloc);
+
+ VkDescriptorSetAllocateInfo alloc_info = {
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
+ .descriptorPool = pool,
+ .descriptorSetCount = 1,
+ .pSetLayouts = &layout
+ };
+
+ VkDescriptorSet set;
+ switch (vkAllocateDescriptorSets(dev, &alloc_info, &set)) {
+ case VK_ERROR_OUT_OF_POOL_MEMORY:
+ case VK_ERROR_FRAGMENTED_POOL:
+ array_append(alloc->full, pool);
+ pool = get_pool(dev, alloc);
+ alloc_info.descriptorPool = pool;
+ // FIXME: check properly
+ if (vkAllocateDescriptorSets(dev, &alloc_info, &set) != VK_SUCCESS)
+ abort();
+ break;
+ case VK_SUCCESS:
+ break;
+ default:
+ abort();
+ }
+
+ array_append(alloc->ready, pool);
+ return set;
+}
+
+void descriptor_destroy(struct descriptor_allocator *alloc, VkDevice dev) {
+ for (size_t i = 0; i < alloc->full.len; i++)
+ vkDestroyDescriptorPool(dev, alloc->full.data[i], NULL);
+ for (size_t i = 0; i < alloc->ready.len; i++)
+ vkDestroyDescriptorPool(dev, alloc->ready.data[i], NULL);
+ alloc->full.len = alloc->ready.len = 0;
+}
+
+// ---
+
+struct descriptor_writer descriptor_write_buffer(size_t binding,
+ VkBuffer buffer, size_t size, size_t offset, VkDescriptorType type) {
+ struct descriptor_writer writer = {
+ .type = DESCRIPTOR_WRITER_BUFFER,
+ .buffer_info = {
+ .buffer = buffer,
+ .offset = offset,
+ .range = size
+ },
+ .write_info = {
+ .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ .dstBinding = binding,
+ .descriptorCount = 1,
+ .descriptorType = type,
+ }
+ };
+ return writer;
+}
+
+struct descriptor_writer descriptor_write_image(size_t binding,
+ VkImageView view, VkSampler sampler, VkImageLayout layout, VkDescriptorType type) {
+ struct descriptor_writer writer = {
+ .type = DESCRIPTOR_WRITER_IMAGE,
+ .image_info = {
+ .sampler = sampler,
+ .imageView = view,
+ .imageLayout = layout
+ },
+ .write_info = {
+ .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ .dstBinding = binding,
+ .descriptorCount = 1,
+ .descriptorType = type,
+ }
+ };
+ return writer;
+}
+
+// TODO: benchmark this against a vkguide-like solution
+void descriptor_update(VkDevice dev, VkDescriptorSet set,
+ size_t write_count, struct descriptor_writer write_sets[write_count]) {
+ VkWriteDescriptorSet writers[write_count];
+ for (size_t i = 0; i < write_count; i++) {
+ writers[i] = write_sets[i].write_info;
+ writers[i].dstSet = set;
+ switch(write_sets[i].type) {
+ case DESCRIPTOR_WRITER_BUFFER:
+ writers[i].pBufferInfo = &write_sets[i].buffer_info;
+ break;
+ case DESCRIPTOR_WRITER_IMAGE:
+ writers[i].pImageInfo = &write_sets[i].image_info;
+ break;
+ }
+ }
+ vkUpdateDescriptorSets(dev, write_count, writers, 0, NULL);
+}
+
+struct vlkn_pipeline *vlkn_pipeline_init(struct vlkn_renderer *renderer, struct vlkn_pipeline_opts *opts) {
+ struct vlkn_pipeline *pipeline = calloc(1, sizeof(*pipeline));
+ if (!pipeline)
+ return NULL;
+
+ struct pool_ratio ratios[opts->descriptors_len];
+
+ struct VkPipelineShaderStageCreateInfo shaders[opts->shader_len];
+ for (size_t i = 0; i < opts->shader_len; i++) {
+ shaders[i] = (VkPipelineShaderStageCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ .module = opts->shaders[i].module,
+ .stage = opts->shaders[i].stages,
+ .pName = "main"
+ };
+ }
+
+ VkPipelineVertexInputStateCreateInfo vertex_input = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+ .vertexBindingDescriptionCount = 1,
+ .pVertexBindingDescriptions = &opts->attrs_desc,
+ .vertexAttributeDescriptionCount = opts->attrs_len,
+ .pVertexAttributeDescriptions = opts->attrs
+ };
+
+ VkPipelineInputAssemblyStateCreateInfo input_assembly = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
+ .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
+ .primitiveRestartEnable = VK_FALSE
+ };
+
+ VkDynamicState dyn_states[] = {
+ VK_DYNAMIC_STATE_VIEWPORT,
+ VK_DYNAMIC_STATE_SCISSOR
+ };
+
+ VkPipelineDynamicStateCreateInfo dynamic_state_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
+ .dynamicStateCount = 2,
+ .pDynamicStates = dyn_states
+ };
+
+ VkPipelineViewportStateCreateInfo viewport_state = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+ .viewportCount = 1,
+ .scissorCount = 1
+ };
+
+ VkPipelineRasterizationStateCreateInfo rasterizer = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
+ .depthClampEnable = VK_FALSE,
+ .rasterizerDiscardEnable = VK_FALSE,
+ .polygonMode = VK_POLYGON_MODE_FILL,
+ .lineWidth = 1.0f,
+ .cullMode = VK_CULL_MODE_BACK_BIT,
+ .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
+ .depthBiasEnable = VK_FALSE,
+ };
+
+ VkPipelineMultisampleStateCreateInfo multisampling = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+ .sampleShadingEnable = VK_FALSE,
+ .rasterizationSamples = VK_SAMPLE_COUNT_1_BIT,
+ .minSampleShading = 1.0f,
+ };
+
+ VkPipelineColorBlendAttachmentState color_state = {
+ .colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT,
+ .blendEnable = VK_FALSE
+ };
+
+ VkPipelineColorBlendStateCreateInfo color_blending = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
+ .logicOpEnable = VK_FALSE,
+ .attachmentCount = 1,
+ .pAttachments = &color_state
+ };
+
+ VkPipelineDepthStencilStateCreateInfo depth_stencil = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
+ .depthTestEnable = VK_TRUE,
+ .depthWriteEnable = VK_TRUE,
+ .depthCompareOp = VK_COMPARE_OP_LESS,
+ .depthBoundsTestEnable = VK_FALSE,
+ .minDepthBounds = 0.0f,
+ .maxDepthBounds = 1.0,
+ .stencilTestEnable = VK_FALSE,
+ };
+
+ VkDescriptorSetLayoutCreateInfo desc_create_info = {
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+ .bindingCount = opts->descriptors_len,
+ .pBindings = opts->descriptors
+ };
+
+ if (!vlkn_check(vkCreateDescriptorSetLayout(renderer->gpu.device, &desc_create_info, NULL, &pipeline->descriptor_layout)))
+ goto err;
+
+ for (size_t i = 0; i < opts->descriptors_len; i++) {
+ ratios[i].type = opts->descriptors->descriptorType;
+ ratios[i].ratio = 1;
+ }
+
+ pipeline->allocator = descriptor_init(1, opts->descriptors_len, ratios);
+
+ VkPipelineLayoutCreateInfo pipeline_layout_create_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ .setLayoutCount = 1,
+ .pSetLayouts = &pipeline->descriptor_layout,
+ .pushConstantRangeCount = opts->push_contant_len,
+ .pPushConstantRanges = opts->push_constants
+ };
+
+ if (!vlkn_check(vkCreatePipelineLayout(renderer->gpu.device, &pipeline_layout_create_info, NULL, &pipeline->layout)))
+ goto err;
+
+ VkGraphicsPipelineCreateInfo pipeline_info = {
+ .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+ .stageCount = opts->shader_len,
+ .pStages = shaders,
+ .pVertexInputState = &vertex_input,
+ .pInputAssemblyState = &input_assembly,
+ .pViewportState = &viewport_state,
+ .pRasterizationState = &rasterizer,
+ .pMultisampleState = &multisampling,
+ .pColorBlendState = &color_blending,
+ .pDynamicState = &dynamic_state_info,
+ .pDepthStencilState = &depth_stencil,
+ .layout = pipeline->layout,
+ .renderPass = renderer->render_pass,
+ .subpass = 0,
+ .basePipelineHandle = VK_NULL_HANDLE,
+ .basePipelineIndex = -1
+ };
+
+ if (!vlkn_check(vkCreateGraphicsPipelines(renderer->gpu.device, VK_NULL_HANDLE, 1, &pipeline_info, NULL, &pipeline->pipeline)))
+ goto err;
+
+ goto out;
+
+err:
+ vkDestroyDescriptorSetLayout(renderer->gpu.device, pipeline->descriptor_layout, NULL);
+ vkDestroyPipelineLayout(renderer->gpu.device, pipeline->layout, NULL);
+ pipeline = (free(pipeline), NULL);
+out:
+ return pipeline;
+}
+
+bool vlkn_shader_load(struct vlkn_renderer *renderer,
+ const char *path, VkShaderStageFlagBits stage, struct vlkn_shader *shader) {
+ if (!path) {
+ dbglog("attempted to load shader with path as null");
+ return false;
+ }
+
+ FILE *fp = fopen(path, "rb");
+ if (!fp) {
+ dbglogf("failed to load pipeline %s.", path);
+ return false;
+ }
+
+ fseek(fp, 0, SEEK_END);
+ size_t len = ftell(fp);
+ rewind(fp);
+
+ uint8_t *bytes = calloc(len, sizeof(*bytes));
+ fread(bytes, sizeof(*bytes), len, fp);
+ fclose(fp);
+
+ VkShaderModuleCreateInfo create_info = {
+ .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
+ .codeSize = len,
+ .pCode = (uint32_t *)bytes
+ };
+
+ bool ret = true;
+ if (!vlkn_check(vkCreateShaderModule(renderer->gpu.device, &create_info, NULL, &shader->module))) {
+ ret = false;
+ }
+
+ shader->stages = stage;
+
+ free(bytes);
+ return ret;
+}
+
+void vlkn_shader_unload(struct vlkn_renderer *renderer, struct vlkn_shader *shader) {
+ vkDestroyShaderModule(renderer->gpu.device, shader->module, NULL);
+}
diff --git a/src/pipeline.h b/src/pipeline.h
new file mode 100644
index 0000000..e571d68
--- /dev/null
+++ b/src/pipeline.h
@@ -0,0 +1,39 @@
+#ifndef _PIPELINE_H_
+#define _PIPELINE_H_
+
+#include <vlkn.h>
+#include "utils.h"
+
+struct pool_ratio {
+ VkDescriptorType type;
+ float ratio;
+};
+
+struct descriptor_allocator {
+ struct array(VkDescriptorPool) full, ready;
+ uint32_t set_count;
+ size_t ratio_count;
+ struct pool_ratio *ratios;
+};
+
+struct descriptor_writer {
+ enum {
+ DESCRIPTOR_WRITER_BUFFER,
+ DESCRIPTOR_WRITER_IMAGE,
+ } type;
+ union {
+ VkDescriptorBufferInfo buffer_info;
+ VkDescriptorImageInfo image_info;
+ };
+ VkWriteDescriptorSet write_info;
+};
+
+struct vlkn_pipeline {
+ struct descriptor_allocator allocator;
+ VkDescriptorSetLayout descriptor_layout;
+ VkDescriptorSet descriptor_set;
+ VkPipelineLayout layout;
+ VkPipeline pipeline;
+};
+
+#endif
diff --git a/src/renderer.c b/src/renderer.c
new file mode 100644
index 0000000..078408d
--- /dev/null
+++ b/src/renderer.c
@@ -0,0 +1,645 @@
+#include "renderer.h"
+#include "window.h"
+#include "buffer.h"
+#include "pipeline.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+static VkDebugUtilsMessengerEXT debug_messenger;
+
+static VKAPI_ATTR VkBool32 VKAPI_CALL debug_callback(VkDebugUtilsMessageSeverityFlagBitsEXT severity,
+ VkDebugUtilsMessageTypeFlagsEXT type, const VkDebugUtilsMessengerCallbackDataEXT *callback_data, void *data) {
+ (void) severity; (void) type; (void) data;
+ dbglogf("%s", callback_data->pMessage);
+ return VK_FALSE;
+}
+
+static bool enumerate_phygpus(struct vlkn_renderer *renderer) {
+ vkEnumeratePhysicalDevices(renderer->instance, &renderer->phy_gpus.cap, NULL);
+ if (renderer->phy_gpus.cap == 0)
+ return false;
+
+ renderer->phy_gpus.gpus = calloc(renderer->phy_gpus.cap, sizeof(*renderer->phy_gpus.gpus));
+ VkPhysicalDevice gpus[renderer->phy_gpus.cap];
+ vkEnumeratePhysicalDevices(renderer->instance, &renderer->phy_gpus.cap, gpus);
+
+ for (uint32_t i = 0; i < renderer->phy_gpus.cap; i++) {
+ uint32_t count;
+
+ vkEnumerateDeviceExtensionProperties(gpus[i], NULL, &count, NULL);
+ VkExtensionProperties ext_props[count];
+ vkEnumerateDeviceExtensionProperties(gpus[i], NULL, &count, ext_props);
+
+ VkPhysicalDeviceFeatures feats;
+ vkGetPhysicalDeviceFeatures(gpus[i], &feats);
+ if (!feats.fragmentStoresAndAtomics) {
+ dbglog("no atomic store");
+ continue;
+ }
+
+ bool swapchain = false;
+ for (size_t i = 0; i < count; i++)
+ if (strcmp(ext_props[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0) {
+ swapchain = true;
+ break;
+ }
+
+ if (!swapchain)
+ continue;
+
+ vkGetPhysicalDeviceQueueFamilyProperties(gpus[i], &count, NULL);
+ VkQueueFamilyProperties queue_props[count];
+ vkGetPhysicalDeviceQueueFamilyProperties(gpus[i], &count, queue_props);
+
+ ssize_t gfx_queue = -1, present_queue = -1;
+
+ for (size_t q = 0; q < count; q++) {
+ if ((queue_props[q].queueFlags & VK_QUEUE_GRAPHICS_BIT))
+ gfx_queue = q;
+
+ VkBool32 present_support = false;
+ vkGetPhysicalDeviceSurfaceSupportKHR(gpus[i], q, renderer->surface, &present_support);
+
+ if (present_support)
+ present_queue = q;
+ }
+
+ if (gfx_queue == -1 || present_queue == -1)
+ continue;
+
+ struct surface_caps caps;
+
+ vkGetPhysicalDeviceSurfaceFormatsKHR(gpus[i], renderer->surface, &caps.formats.len, NULL);
+ if (caps.formats.len == 0)
+ return false;
+ vkGetPhysicalDeviceSurfacePresentModesKHR(gpus[i], renderer->surface, &caps.present_modes.len, NULL);
+ if (caps.present_modes.len == 0)
+ return false;
+
+ vkGetPhysicalDeviceSurfaceCapabilitiesKHR(gpus[i], renderer->surface, &caps.caps);
+ vkGetPhysicalDeviceSurfaceFormatsKHR(gpus[i], renderer->surface,
+ &caps.formats.len, (caps.formats.data = calloc(caps.formats.len, sizeof(*caps.formats.data))));
+ vkGetPhysicalDeviceSurfacePresentModesKHR(gpus[i], renderer->surface,
+ &caps.present_modes.len, (caps.present_modes.data = calloc(caps.present_modes.len, sizeof(*caps.present_modes.data))));
+
+ renderer->phy_gpus.gpus[renderer->phy_gpus.len++] = (struct phy_gpu) {
+ .gpu = gpus[i],
+ .graphics_queue = gfx_queue,
+ .present_queue = present_queue,
+ .surface_caps = caps
+ };
+
+ if (!renderer->phy_gpus.chosen) {
+ VkPhysicalDeviceProperties props;
+ vkGetPhysicalDeviceProperties(gpus[i], &props);
+ if (props.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU)
+ renderer->phy_gpus.chosen = &renderer->phy_gpus.gpus[renderer->phy_gpus.len - 1];
+ }
+ }
+
+ if (renderer->phy_gpus.len == 0)
+ return false;
+
+ if (!renderer->phy_gpus.chosen)
+ renderer->phy_gpus.chosen = &renderer->phy_gpus.gpus[0];
+
+ return true;
+}
+
+static VkResult create_device(struct vlkn_renderer *ren) {
+ float queue_prio = 1.0f;
+ VkDeviceQueueCreateInfo queue_infos[] = {
+ {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
+ .queueFamilyIndex = ren->phy_gpus.chosen->graphics_queue,
+ .queueCount = 1,
+ .pQueuePriorities = &queue_prio
+ },
+ {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
+ .queueFamilyIndex = ren->phy_gpus.chosen->present_queue,
+ .queueCount = 1,
+ .pQueuePriorities = &queue_prio
+ }
+ };
+
+ const char * const ext = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
+
+ VkPhysicalDeviceFeatures feats = { .fragmentStoresAndAtomics = VK_TRUE };
+ VkPhysicalDeviceVulkan12Features feats12 = {
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,
+ .runtimeDescriptorArray = VK_TRUE
+ };
+
+ VkDeviceCreateInfo create_info = {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
+ .pNext = &feats12,
+ .pQueueCreateInfos = queue_infos,
+ .queueCreateInfoCount = len(queue_infos),
+ .enabledExtensionCount = 1,
+ .ppEnabledExtensionNames = &ext,
+ .pEnabledFeatures = &feats
+ };
+
+ VkResult res = vkCreateDevice(ren->phy_gpus.chosen->gpu, &create_info, NULL, &ren->gpu.device);
+ if (res != VK_SUCCESS)
+ return res;
+
+ vkGetDeviceQueue(ren->gpu.device, ren->phy_gpus.chosen->graphics_queue, 0, &ren->gpu.gfx_queue);
+ vkGetDeviceQueue(ren->gpu.device, ren->phy_gpus.chosen->present_queue, 0, &ren->gpu.present_queue);
+
+ return VK_SUCCESS;
+}
+
+static VkSurfaceFormatKHR pick_swapchain_format(const struct surface_caps *caps) {
+ assert(caps && caps->formats.data && caps->formats.len > 1);
+ for (size_t i = 0; i < caps->formats.len; i++) {
+ VkSurfaceFormatKHR format = caps->formats.data[i];
+ if (format.format == VK_FORMAT_B8G8R8A8_SRGB && format.colorSpace == VK_COLORSPACE_SRGB_NONLINEAR_KHR)
+ return format;
+ }
+ return caps->formats.data[0];
+}
+
+static VkPresentModeKHR pick_present_mode(const struct surface_caps *caps) {
+ assert(caps && caps->present_modes.data);
+ for (size_t i = 0; i < caps->present_modes.len; i++) {
+ VkPresentModeKHR mode = caps->present_modes.data[i];
+ if (mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)
+ return mode;
+ }
+ return VK_PRESENT_MODE_FIFO_KHR;
+}
+
+static VkResult create_renderpass(struct vlkn_renderer *ren) {
+ VkAttachmentDescription color_attach = {
+ .format = ren->swapchain.format,
+ .samples = VK_SAMPLE_COUNT_1_BIT,
+ .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
+ .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+ .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
+ .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
+ .finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR
+ };
+
+ VkAttachmentReference color_attach_ref = {
+ .attachment = 0,
+ .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ };
+
+ VkAttachmentDescription depth_attach = {
+ .format = VK_FORMAT_D32_SFLOAT,
+ .samples = VK_SAMPLE_COUNT_1_BIT,
+ .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
+ .storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
+ .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
+ .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
+ .finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
+ };
+
+ VkAttachmentReference depth_attach_ref = {
+ .attachment = 1,
+ .layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
+ };
+
+ VkSubpassDescription subpasses[] = {
+ {
+ .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
+ .colorAttachmentCount = 1,
+ .pColorAttachments = &color_attach_ref,
+ .pDepthStencilAttachment = &depth_attach_ref
+ }
+ };
+
+ VkSubpassDependency dep = {
+ .srcSubpass = VK_SUBPASS_EXTERNAL,
+ .dstSubpass = 0,
+ .srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
+ .srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
+ .dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
+ .dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT
+ };
+
+ VkAttachmentDescription attachs[] = { color_attach, depth_attach };
+
+ VkRenderPassCreateInfo create_info = {
+ .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+ .attachmentCount = len(attachs),
+ .pAttachments = attachs,
+ .subpassCount = len(subpasses),
+ .pSubpasses = subpasses,
+ .dependencyCount = 1,
+ .pDependencies = &dep
+ };
+
+ return vkCreateRenderPass(ren->gpu.device, &create_info, NULL, &ren->render_pass);
+}
+
+void swapchain_destroy(struct vlkn_renderer *ren);
+static VkResult create_swapchain(struct vlkn_renderer *ren) {
+ struct surface_caps *caps = &ren->phy_gpus.chosen->surface_caps;
+ VkSurfaceFormatKHR format = pick_swapchain_format(caps);
+ ren->swapchain.format = format.format;
+ ren->swapchain.extent = caps->caps.currentExtent.width != UINT32_MAX ? caps->caps.currentExtent :
+ (VkExtent2D) {
+ .width = ren->win->width,
+ .height = ren->win->height
+ };
+
+ uint32_t image_count = caps->caps.minImageCount + 1;
+ if (caps->caps.maxImageCount > 0 && image_count > caps->caps.maxImageCount)
+ image_count = caps->caps.maxImageCount;
+
+ // TODO: understand those values
+ VkSwapchainCreateInfoKHR create_info = {
+ .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
+ .surface = ren->surface,
+ .minImageCount = image_count,
+ .imageFormat = ren->swapchain.format,
+ .imageColorSpace = format.colorSpace,
+ .imageExtent = ren->swapchain.extent,
+ .imageArrayLayers = 1,
+ .imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
+ .preTransform = caps->caps.currentTransform,
+ .compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
+ .presentMode = pick_present_mode(caps),
+ .clipped = VK_TRUE,
+ };
+
+ uint32_t queue_families[] = { ren->phy_gpus.chosen->present_queue, ren->phy_gpus.chosen->graphics_queue };
+ if (queue_families[0] == queue_families[1]) {
+ create_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
+ create_info.queueFamilyIndexCount = 2;
+ create_info.pQueueFamilyIndices = queue_families;
+ } else {
+ create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ }
+
+ VkResult res = vkCreateSwapchainKHR(ren->gpu.device, &create_info, NULL, &ren->swapchain.swapchain);
+ if (res != VK_SUCCESS)
+ return false;
+
+ if (ren->render_pass == VK_NULL_HANDLE) {
+ res = create_renderpass(ren);
+ if (res != VK_SUCCESS)
+ return res;
+ }
+
+ vkGetSwapchainImagesKHR(ren->gpu.device, ren->swapchain.swapchain, &ren->swapchain.images.len, NULL);
+ ren->swapchain.images.data = calloc(ren->swapchain.images.len, sizeof(*ren->swapchain.images.data));
+ VkImage images[ren->swapchain.images.len];
+ vkGetSwapchainImagesKHR(ren->gpu.device, ren->swapchain.swapchain, &ren->swapchain.images.len, images);
+
+ struct image_opts opts = {
+ .extent = ren->swapchain.extent,
+ .mip_level = 1,
+ .format = VK_FORMAT_D32_SFLOAT,
+ .tiling = VK_IMAGE_TILING_OPTIMAL,
+ .usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
+ .mem_props = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
+ .aspect = VK_IMAGE_ASPECT_DEPTH_BIT
+ };
+
+ res = image_create(ren, opts, &ren->depth);
+ if (res != VK_SUCCESS)
+ return res;
+
+ for (size_t i = 0; i < ren->swapchain.images.len; i++) {
+ ren->swapchain.images.data->image = images[i];
+ res = image_view_create(ren, ren->swapchain.format,
+ VK_IMAGE_ASPECT_COLOR_BIT, images[i], &ren->swapchain.images.data[i].view);
+ if (res != VK_SUCCESS)
+ return res;
+
+ VkImageView attachs[] = { ren->swapchain.images.data[i].view, ren->depth.view };
+ VkFramebufferCreateInfo fb_info = {
+ .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+ .renderPass = ren->render_pass,
+ .attachmentCount = len(attachs),
+ .pAttachments = attachs,
+ .width = ren->swapchain.extent.width,
+ .height = ren->swapchain.extent.height,
+ .layers = 1
+ };
+ res = vkCreateFramebuffer(ren->gpu.device, &fb_info, NULL, &ren->swapchain.images.data[i].framebuffer);
+ if (res != VK_SUCCESS)
+ return res;
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult create_sync_objects(struct vlkn_renderer *ren) {
+ VkSemaphoreCreateInfo semaphore_info = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO
+ };
+ VkFenceCreateInfo fence_info = {
+ .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+ .flags = VK_FENCE_CREATE_SIGNALED_BIT
+ };
+ VkResult res;
+#define X(exp) do { res = exp; if (res != VK_SUCCESS) return res; } while (0)
+ for (size_t i = 0; i < MAX_FRAMES; i++) {
+ X(vkCreateSemaphore(ren->gpu.device, &semaphore_info, NULL, &ren->locks.image_available[i]));
+ X(vkCreateSemaphore(ren->gpu.device, &semaphore_info, NULL, &ren->locks.render_finished[i]));
+ X(vkCreateFence(ren->gpu.device, &fence_info, NULL, &ren->locks.in_flight[i]));
+ }
+#undef X
+ return VK_SUCCESS;
+}
+
+static VkResult create_command_pool(struct vlkn_renderer *ren) {
+ VkCommandPoolCreateInfo pool_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
+ .flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
+ .queueFamilyIndex = ren->phy_gpus.chosen->graphics_queue
+ };
+
+ VkResult res = vkCreateCommandPool(ren->gpu.device, &pool_info, NULL, &ren->command.pool);
+ if (res != VK_SUCCESS)
+ return res;
+
+ VkCommandBufferAllocateInfo alloc_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ .commandPool = ren->command.pool,
+ .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ .commandBufferCount = MAX_FRAMES
+ };
+
+ return vkAllocateCommandBuffers(ren->gpu.device, &alloc_info, ren->command.buffers);
+}
+
+void resize_callback(void *data) {
+ struct vlkn_renderer *ren = data;
+
+ if (ren->win->width == 0 || ren->win->height == 0)
+ return;
+ vkDeviceWaitIdle(ren->gpu.device);
+ swapchain_destroy(ren);
+ create_swapchain(ren);
+}
+
+struct vlkn_renderer *vlkn_renderer_init(struct vlkn_window *win) {
+ struct vlkn_renderer *ren = calloc(1, sizeof(*ren));
+ if (!ren)
+ return NULL;
+
+ ren->win = win;
+
+ VkApplicationInfo app_info = {
+ .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
+ .pApplicationName = win->title ? win->title : "",
+ .applicationVersion = VK_MAKE_VERSION(1, 0, 0),
+ .pEngineName = "void",
+ .engineVersion = VK_MAKE_VERSION(1, 0, 0),
+ .apiVersion = VK_API_VERSION_1_3
+ };
+
+ // TODO: query window
+ const char *exts[] = {
+ VK_KHR_SURFACE_EXTENSION_NAME,
+ VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
+ VK_EXT_DEBUG_UTILS_EXTENSION_NAME,
+ };
+
+ const char *validation_layers[] = { "VK_LAYER_KHRONOS_validation" };
+
+ VkInstanceCreateInfo create_info = {
+ .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
+ .pApplicationInfo = &app_info,
+ .enabledExtensionCount = len(exts),
+ .ppEnabledExtensionNames = exts,
+ .enabledLayerCount = 1,
+ .ppEnabledLayerNames = validation_layers
+ };
+
+ if (!vlkn_check(vkCreateInstance(&create_info, NULL, &ren->instance)))
+ goto err;
+
+ VkDebugUtilsMessengerCreateInfoEXT debug_create_info = {
+ .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,
+ .messageSeverity = //VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT
+ | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT,
+ .messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT
+ | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT
+ | VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT,
+ .pfnUserCallback = debug_callback
+ };
+ ((PFN_vkCreateDebugUtilsMessengerEXT)
+ vkGetInstanceProcAddr(ren->instance, "vkCreateDebugUtilsMessengerEXT"))(ren->instance, &debug_create_info, NULL, &debug_messenger);
+
+ ren->surface = window_create_vk_surface(ren->instance, win);
+ if (ren->surface == VK_NULL_HANDLE) {
+ dbglog("failed to create window surface");
+ goto err;
+ }
+
+ if (!enumerate_phygpus(ren)) {
+ dbglog("failed to enumerate physical gpus");
+ goto err;
+ }
+
+ if (!vlkn_check(create_device(ren)))
+ goto err;
+
+ if (!vlkn_check(create_swapchain(ren)))
+ goto err;
+
+ if (!vlkn_check(create_sync_objects(ren)))
+ goto err;
+
+ if (!vlkn_check(create_command_pool(ren)))
+ goto err;
+
+ window_on_resize(win, resize_callback, ren);
+ goto out;
+err:
+ ren = (vlkn_renderer_destroy(ren), NULL);
+out:
+ return ren;
+}
+
+void swapchain_destroy(struct vlkn_renderer *ren) {
+ image_destroy(ren, &ren->depth);
+
+ for (size_t i = 0; i < ren->swapchain.images.len; i++) {
+ vkDestroyFramebuffer(ren->gpu.device, ren->swapchain.images.data[i].framebuffer, NULL);
+ vkDestroyImageView(ren->gpu.device, ren->swapchain.images.data[i].view, NULL);
+ }
+ free(ren->swapchain.images.data);
+ vkDestroySwapchainKHR(ren->gpu.device, ren->swapchain.swapchain, NULL);
+}
+
+void vlkn_renderer_destroy(struct vlkn_renderer *renderer) {
+ if (!renderer)
+ return;
+ if (renderer->gpu.device) {
+ vkDeviceWaitIdle(renderer->gpu.device);
+ for (size_t i = 0; i < MAX_FRAMES; i++) {
+ vkDestroySemaphore(renderer->gpu.device, renderer->locks.image_available[i], NULL);
+ vkDestroySemaphore(renderer->gpu.device, renderer->locks.render_finished[i], NULL);
+ vkDestroyFence(renderer->gpu.device, renderer->locks.in_flight[i], NULL);
+ }
+
+ swapchain_destroy(renderer);
+
+ vkDestroyCommandPool(renderer->gpu.device, renderer->command.pool, NULL);
+
+ vkDestroyPipeline(renderer->gpu.device, renderer->pipeline.gfx, NULL);
+ vkDestroyPipeline(renderer->gpu.device, renderer->pipeline.blend, NULL);
+ vkDestroyPipelineLayout(renderer->gpu.device, renderer->pipeline.layout, NULL);
+
+ vkDestroyRenderPass(renderer->gpu.device, renderer->render_pass, NULL);
+
+ vkDestroyDevice(renderer->gpu.device, NULL);
+ }
+
+ if (renderer->instance) {
+ vkDestroySurfaceKHR(renderer->instance, renderer->surface, NULL);
+
+ ((PFN_vkDestroyDebugUtilsMessengerEXT)
+ vkGetInstanceProcAddr(renderer->instance, "vkDestroyDebugUtilsMessengerEXT"))(renderer->instance, debug_messenger, NULL);
+ vkDestroyInstance(renderer->instance, NULL);
+ }
+
+ free(renderer);
+}
+
+uint32_t idx;
+void vlkn_render(struct vlkn_renderer *renderer) {
+ size_t frame = 0;
+ vkWaitForFences(renderer->gpu.device, 1, &renderer->locks.in_flight[frame], VK_TRUE, UINT64_MAX);
+ vkResetFences(renderer->gpu.device, 1, &renderer->locks.in_flight[frame]);
+
+ VkResult res = vkAcquireNextImageKHR(renderer->gpu.device, renderer->swapchain.swapchain,
+ UINT64_MAX, renderer->locks.image_available[frame], VK_NULL_HANDLE, &idx);
+ switch (res) {
+ case VK_SUCCESS:
+ case VK_SUBOPTIMAL_KHR:
+ break;
+ case VK_ERROR_OUT_OF_DATE_KHR:
+ resize_callback(renderer);
+ return;
+ default:
+ dbglog("failed to aquire swapchain images");
+ abort();
+ }
+
+
+ VkCommandBuffer buffer = renderer->command.buffers[frame];
+ vkResetCommandBuffer(renderer->command.buffers[frame], 0);
+
+ VkCommandBufferBeginInfo begin_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+ };
+
+ if (!vlkn_check(vkBeginCommandBuffer(buffer, &begin_info)))
+ return;
+
+ VkClearValue clear_color[] = {
+ { .color = {{ 0.0f, 0.0f, 0.0f, 1.0f }} },
+ { .depthStencil = { 1.0f, 0.0f }}
+ };
+ VkRenderPassBeginInfo render_pass_info = {
+ .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
+ .renderPass = renderer->render_pass,
+ .framebuffer = renderer->swapchain.images.data[idx].framebuffer,
+ .renderArea = {
+ .extent = renderer->swapchain.extent,
+ .offset = {0, 0}
+ },
+ .clearValueCount = len(clear_color),
+ .pClearValues = clear_color
+ };
+
+ vkCmdBeginRenderPass(buffer, &render_pass_info, VK_SUBPASS_CONTENTS_INLINE);
+
+ VkViewport viewport = {
+ .x = 0.0f,
+ .y = 0.0f,
+ .width = renderer->swapchain.extent.width,
+ .height = renderer->swapchain.extent.height,
+ .minDepth = 0.0f,
+ .maxDepth = 1.0f,
+ };
+ vkCmdSetViewport(buffer, 0, 1, &viewport);
+
+ VkRect2D scissor = {
+ .offset = {0, 0},
+ .extent = renderer->swapchain.extent
+ };
+
+ vkCmdSetScissor(buffer, 0, 1, &scissor);
+}
+
+void vlkn_draw(struct vlkn_renderer *renderer, struct vlkn_pipeline *pipeline,
+ size_t mesh_len, struct vlkn_mesh meshes[mesh_len]) {
+ size_t frame = 0;
+ VkCommandBuffer buffer = renderer->command.buffers[frame];
+
+ vkCmdBindPipeline(buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline->pipeline);
+
+ /*
+ vkCmdBindDescriptorSets(buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
+ pipeline->layout, 0, 1, &desc_sets[current_frame], 0, NULL);
+ */
+
+ for (size_t i = 0; i < mesh_len; i++) {
+ vkCmdBindVertexBuffers(buffer, 0, 1, &meshes[i].vertex.buffer, (VkDeviceSize[]){0});
+ vkCmdBindIndexBuffer(buffer, meshes[i].index.buffer, 0, VK_INDEX_TYPE_UINT32);
+ vkCmdDrawIndexed(buffer, meshes[i].index_count, 1, 0, 0, 0);
+ vkCmdPushConstants(buffer, pipeline->layout,
+ VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(struct vec3), &meshes[i].position);
+ }
+}
+
+void vlkn_present(struct vlkn_renderer *renderer) {
+ size_t frame = 0;
+ VkCommandBuffer buffer = renderer->command.buffers[frame];
+
+ vkCmdEndRenderPass(buffer);
+ vlkn_check(vkEndCommandBuffer(buffer));
+
+ VkSubmitInfo submit = {
+ .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ .waitSemaphoreCount = 1,
+ .pWaitSemaphores = &renderer->locks.image_available[frame],
+ .pWaitDstStageMask = (VkPipelineStageFlags[]) { VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT },
+ .signalSemaphoreCount = 1,
+ .pSignalSemaphores = &renderer->locks.render_finished[frame],
+ .commandBufferCount = 1,
+ .pCommandBuffers = &renderer->command.buffers[frame]
+ };
+
+ if (!vlkn_check(vkQueueSubmit(renderer->gpu.gfx_queue, 1, &submit, renderer->locks.in_flight[frame])))
+ return;
+
+ VkPresentInfoKHR present = {
+ .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
+ .waitSemaphoreCount = 1,
+ .pWaitSemaphores = &renderer->locks.render_finished[frame],
+ .swapchainCount = 1,
+ .pSwapchains = &renderer->swapchain.swapchain,
+ .pImageIndices = &idx
+ };
+
+ switch (vkQueuePresentKHR(renderer->gpu.present_queue, &present)) {
+ case VK_SUCCESS:
+ break;
+ case VK_SUBOPTIMAL_KHR:
+ case VK_ERROR_OUT_OF_DATE_KHR:
+ resize_callback(renderer);
+ break;
+ default:
+ abort();
+ return;
+ }
+
+ wl_display_roundtrip(renderer->win->dpy);
+ //ren->current_frame = (ren->current_frame + 1) % MAX_FRAMES;
+}
diff --git a/src/renderer.h b/src/renderer.h
new file mode 100644
index 0000000..fcaa5af
--- /dev/null
+++ b/src/renderer.h
@@ -0,0 +1,97 @@
+#ifndef _RENDERER_H_
+#define _RENDERER_H_
+
+#include <vlkn.h>
+#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_wayland.h>
+
+#include "buffer.h"
+#include "utils.h"
+
+#define MAX_FRAMES 3
+
+struct vlkn_renderer {
+ struct vlkn_window *win;
+ VkInstance instance;
+ VkSurfaceKHR surface;
+
+ struct {
+ uint32_t cap, len;
+ struct phy_gpu {
+ VkPhysicalDevice gpu;
+ size_t graphics_queue;
+ size_t present_queue;
+ struct surface_caps {
+ VkSurfaceCapabilitiesKHR caps;
+ struct {
+ uint32_t len;
+ VkSurfaceFormatKHR *data;
+ } formats;
+ struct {
+ uint32_t len;
+ VkPresentModeKHR *data;
+ } present_modes;
+ } surface_caps;
+ } *gpus;
+ struct phy_gpu *chosen;
+ } phy_gpus;
+
+ struct {
+ VkDevice device;
+ VkQueue gfx_queue;
+ VkQueue present_queue;
+ } gpu;
+
+ struct {
+ VkSwapchainKHR swapchain;
+ VkFormat format;
+ VkExtent2D extent;
+ struct {
+ uint32_t len;
+ struct {
+ VkImage image;
+ VkImageView view;
+ VkFramebuffer framebuffer;
+ } *data;
+ } images;
+ } swapchain;
+
+ struct {
+ VkCommandPool pool;
+ VkCommandBuffer buffers[MAX_FRAMES];
+ } command;
+
+ struct {
+ VkPipelineLayout layout;
+ VkPipeline gfx;
+ VkPipeline transp;
+ VkPipeline blend;
+ } pipeline;
+
+ struct {
+ VkSemaphore image_available[MAX_FRAMES];
+ VkSemaphore render_finished[MAX_FRAMES];
+ VkFence in_flight[MAX_FRAMES];
+ } locks;
+
+ struct vlkn_images depth;
+/*
+ struct {
+ VkDescriptorSetLayout layout;
+ VkDescriptorPool pool;
+ struct descriptor_allocator allocator;
+ VkDescriptorSet sets[MAX_FRAMES];
+ } descriptor;
+
+ struct {
+ struct buffer buffer;
+ void *data;
+ } uniform[MAX_FRAMES];
+*/
+
+ VkRenderPass render_pass;
+
+ uint32_t current_frame;
+};
+
+#endif
diff --git a/src/utils.c b/src/utils.c
new file mode 100644
index 0000000..5b3b87d
--- /dev/null
+++ b/src/utils.c
@@ -0,0 +1,38 @@
+#include "utils.h"
+#include "renderer.h"
+
+VkCommandBuffer begin_single_command(struct vlkn_renderer *ren) {
+ VkCommandBufferAllocateInfo alloc_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ .commandPool = ren->command.pool,
+ .commandBufferCount = 1
+ };
+
+ VkCommandBuffer buffer;
+ vkAllocateCommandBuffers(ren->gpu.device, &alloc_info, &buffer);
+
+ VkCommandBufferBeginInfo begin_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+ .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT
+ };
+
+ vkBeginCommandBuffer(buffer, &begin_info);
+
+ return buffer;
+}
+
+void end_single_command(struct vlkn_renderer *ren, VkCommandBuffer buffer) {
+ vkEndCommandBuffer(buffer);
+
+ VkSubmitInfo submit_info = {
+ .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ .commandBufferCount = 1,
+ .pCommandBuffers = &buffer
+ };
+
+ vkQueueSubmit(ren->gpu.gfx_queue, 1, &submit_info, VK_NULL_HANDLE);
+ vkQueueWaitIdle(ren->gpu.gfx_queue);
+
+ vkFreeCommandBuffers(ren->gpu.device, ren->command.pool, 1, &buffer);
+}
diff --git a/src/utils.h b/src/utils.h
new file mode 100644
index 0000000..ee0b6c6
--- /dev/null
+++ b/src/utils.h
@@ -0,0 +1,71 @@
+#ifndef _UTILS_H_
+#define _UTILS_H_
+
+#define len(X) sizeof(X) / sizeof(*X)
+
+// Yes, i know.
+#define min(X, Y) X < Y ? X : Y
+#define max(X, Y) X > Y ? X : Y
+
+#define array(type) { size_t len, cap; type *data; }
+#define array_init(array) do {\
+ (array).len = 0; \
+ (array).cap = 5; \
+ (array).data = calloc(5, sizeof(*(array).data)); \
+ } while (0)
+#define array_new(type) { \
+ .len = 0, \
+ .cap = 5, \
+ .data = calloc(5, sizeof(type)) \
+ }
+
+#define foreach(item, array) \
+ for (typeof((array).data) item = (array).data; item < (array).data + (array).len; item++)
+
+#define array_append(array, item) do { \
+ ensure_ptr((array).data, (array).len + 1, (array).cap); \
+ (array).data[(array).len++] = item; \
+ } while(0)
+
+#define ensure(var, ptr_mem, len_mem, cap_mem) ensure_ptr((var).ptr_mem, (var).len_mem, (var).cap_mem)
+#define ensure_ptr(ptr, len, cap) do { \
+ assert(ptr); \
+ if ((len) > (cap)) { \
+ (ptr) = realloc((ptr), ((cap) = (cap) * 2 > (len) ? (cap) * 2 : (len)) * sizeof(*(ptr))); \
+ if (!(ptr)) \
+ abort(); \
+ } \
+ } while (0)
+
+#define STRINGFY(X) #X
+#define STRGY(X) STRINGFY(X)
+
+
+#ifndef NDEBUG
+# include <stdio.h>
+# define dbglog(F) fprintf(stderr, __FILE__ ":%s:" STRGY(__LINE__) ": " F "\n", __func__)
+# define dbglogf(F, ...) fprintf(stderr, __FILE__ ":%s:" STRGY(__LINE__) ": " F "\n", __func__, __VA_ARGS__)
+#else
+# define dbglog(F) do { /* noop */ } while(0)
+# define dbglogf(F, X) do { /* noop */ } while(0)
+#endif
+
+#include <vulkan/vk_enum_string_helper.h>
+#include <stdbool.h>
+
+static inline bool vlkn_check_expr(VkResult exp, const char *exp_str) {
+ if (exp == VK_SUCCESS)
+ return true;
+ dbglogf("%s failed with %s", exp_str, string_VkResult(exp));
+ return false;
+}
+
+#define vlkn_check(exp) vlkn_check_expr(exp, #exp)
+
+#include <vulkan/vulkan.h>
+struct vlkn_renderer;
+
+VkCommandBuffer begin_single_command(struct vlkn_renderer *ren);
+void end_single_command(struct vlkn_renderer *ren, VkCommandBuffer buffer);
+
+#endif
diff --git a/src/window.c b/src/window.c
new file mode 100644
index 0000000..0d5bac8
--- /dev/null
+++ b/src/window.c
@@ -0,0 +1,177 @@
+#include <vlkn.h>
+#include "window.h"
+#include "utils.h"
+#include <vulkan/vulkan_wayland.h>
+#include <stdlib.h>
+#include <string.h>
+
+static void global(void *data, struct wl_registry *reg,
+ uint32_t name, const char *interface, uint32_t version) {
+ struct vlkn_window *win = data;
+ if (strcmp(wl_compositor_interface.name, interface) == 0) {
+ win->comp = wl_registry_bind(reg, name, &wl_compositor_interface, version);
+ } else if (strcmp(xdg_wm_base_interface.name, interface) == 0) {
+ win->xdg_base = wl_registry_bind(reg, name, &xdg_wm_base_interface, version);
+ } else if (strcmp(zxdg_decoration_manager_v1_interface.name, interface) == 0) {
+ win->deco_manager = wl_registry_bind(reg, name,
+ &zxdg_decoration_manager_v1_interface, version);
+ }
+}
+
+static void global_remove(void *data, struct wl_registry *reg, uint32_t name) {
+ (void) data; (void) reg; (void) name;
+ abort();
+}
+
+static struct wl_registry_listener reg_list = {
+ .global = global,
+ .global_remove = global_remove
+};
+
+static void ping(void *data, struct xdg_wm_base *base, uint32_t serial) {
+ (void) data;
+ xdg_wm_base_pong(base, serial);
+}
+
+static struct xdg_wm_base_listener xdg_list = {
+ .ping = ping
+};
+
+void xdg_surface_configure(void *data, struct xdg_surface *xdg_surface, uint32_t serial) {
+ (void) data;
+ xdg_surface_ack_configure(xdg_surface, serial);
+}
+
+static struct xdg_surface_listener xdg_surface_list = {
+ .configure = xdg_surface_configure,
+};
+
+void toplevel_configure(void *data, struct xdg_toplevel *xdg_toplevel,
+ int32_t width, int32_t height, struct wl_array *states) {
+ (void) xdg_toplevel; (void) states;
+ struct vlkn_window *win = data;
+ win->resize = true;
+ win->width = width != 0 ? width : 1920;
+ win->height = height != 0 ? height : 1080;
+ if (win->on_resize)
+ win->on_resize(win->data);
+
+ dbglogf("%dx%d\n", win->width, win->height);
+}
+
+void toplevel_close(void *data, struct xdg_toplevel *toplevel) {
+ (void) toplevel;
+ struct vlkn_window *win = data;
+ win->should_close = true;
+}
+
+void configure_bounds(void *data, struct xdg_toplevel *xdg_toplevel, int32_t width, int32_t height) {
+ (void) data; (void) xdg_toplevel; (void) width; (void) height;
+}
+
+void wm_capabilities(void *data, struct xdg_toplevel *xdg_toplevel, struct wl_array *capabilities) {
+ (void) data; (void) xdg_toplevel; (void) capabilities;
+}
+
+static struct xdg_toplevel_listener toplevel_listener = {
+ .configure = toplevel_configure,
+ .close = toplevel_close,
+ .configure_bounds = configure_bounds,
+ .wm_capabilities = wm_capabilities
+};
+
+struct vlkn_window *vlkn_window_init(const char *title) {
+ struct vlkn_window *win = calloc(1, sizeof(*win));
+ if (!win)
+ return NULL;
+
+ win->title = title;
+ win->should_close = false;
+
+ win->dpy = wl_display_connect(NULL);
+ if (!win->dpy)
+ goto err;
+
+ win->reg = wl_display_get_registry(win->dpy);
+ if (!win->reg)
+ goto err;
+
+ wl_registry_add_listener(win->reg, &reg_list, win);
+ wl_display_roundtrip(win->dpy);
+
+ if (!win->comp || !win->dpy || !win->deco_manager || !win->xdg_base)
+ goto err;
+
+ xdg_wm_base_add_listener(win->xdg_base, &xdg_list, win);
+
+ win->surface = wl_compositor_create_surface(win->comp);
+ if (!win->surface)
+ goto err;
+
+ win->xdg_surface = xdg_wm_base_get_xdg_surface(win->xdg_base, win->surface);
+ xdg_surface_add_listener(win->xdg_surface, &xdg_surface_list, win);
+
+ win->xdg_toplevel = xdg_surface_get_toplevel(win->xdg_surface);
+ xdg_toplevel_set_title(win->xdg_toplevel, title);
+ xdg_toplevel_set_min_size(win->xdg_toplevel, 640, 480);
+ xdg_toplevel_add_listener(win->xdg_toplevel, &toplevel_listener, win);
+
+ win->deco = zxdg_decoration_manager_v1_get_toplevel_decoration(win->deco_manager, win->xdg_toplevel);
+ zxdg_toplevel_decoration_v1_set_mode(win->deco, ZXDG_TOPLEVEL_DECORATION_V1_MODE_SERVER_SIDE);
+
+ wl_surface_commit(win->surface);
+ while (!win->resize)
+ wl_display_roundtrip(win->dpy);
+ win->resize = false;
+
+ return win;
+
+err:
+ vlkn_window_destroy(win);
+ return NULL;
+}
+
+void window_on_resize(struct vlkn_window *win, void (*callback)(void *data), void *data) {
+ win->on_resize = callback;
+ win->data = data;
+}
+
+void vlkn_window_destroy(struct vlkn_window *win) {
+ if (!win)
+ return;
+
+ if (win->deco)
+ zxdg_toplevel_decoration_v1_destroy(win->deco);
+ if (win->deco_manager)
+ zxdg_decoration_manager_v1_destroy(win->deco_manager);
+ if (win->xdg_toplevel)
+ xdg_toplevel_destroy(win->xdg_toplevel);
+ if (win->xdg_surface)
+ xdg_surface_destroy(win->xdg_surface);
+ if (win->xdg_base)
+ xdg_wm_base_destroy(win->xdg_base);
+ if (win->comp)
+ wl_compositor_destroy(win->comp);
+ if (win->reg)
+ wl_registry_destroy(win->reg);
+ if (win->dpy)
+ wl_display_disconnect(win->dpy);
+ free(win);
+}
+
+VkSurfaceKHR window_create_vk_surface(VkInstance instance, struct vlkn_window *win) {
+ if (!win)
+ return VK_NULL_HANDLE;
+
+ VkWaylandSurfaceCreateInfoKHR create_info = {
+ .sType = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR,
+ .display = win->dpy,
+ .surface = win->surface
+ };
+
+ VkSurfaceKHR surface;
+ if (!vlkn_check(vkCreateWaylandSurfaceKHR(instance, &create_info, NULL, &surface)))
+ return VK_NULL_HANDLE;
+
+ return surface;
+}
diff --git a/src/window.h b/src/window.h
new file mode 100644
index 0000000..36e32a2
--- /dev/null
+++ b/src/window.h
@@ -0,0 +1,35 @@
+#ifndef _WINDOW_H_
+#define _WINDOW_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <vulkan/vulkan.h>
+
+#include <wayland-client.h>
+#include "xdg-shell-protocol.h"
+#include "xdg-decoration-unstable-v1-protocol.h"
+
+struct vlkn_window {
+ const char *title;
+
+ struct wl_display *dpy;
+ struct wl_registry *reg;
+ struct wl_compositor *comp;
+ struct zxdg_decoration_manager_v1 *deco_manager;
+ struct zxdg_toplevel_decoration_v1 *deco;
+ struct wl_surface *surface;
+ struct xdg_wm_base *xdg_base;
+ struct xdg_surface *xdg_surface;
+ struct xdg_toplevel *xdg_toplevel;
+
+ int32_t width, height;
+ bool resize, should_close;
+
+ void *data;
+ void (*on_resize)(void *data);
+};
+
+void window_on_resize(struct vlkn_window *win, void (*callback)(void *data), void *data);
+VkSurfaceKHR window_create_vk_surface(VkInstance instance, struct vlkn_window *win);
+
+#endif