summaryrefslogtreecommitdiff
path: root/src/pipeline.c
diff options
context:
space:
mode:
authorAnna (navi) Figueiredo Gomes <navi@vlhl.dev>2024-06-30 22:23:08 +0200
committerAnna (navi) Figueiredo Gomes <navi@vlhl.dev>2024-06-30 22:29:30 +0200
commit87c8428df0bf1b2f6932bb16390f69be57e21f6b (patch)
tree0cc0f7cab79a0373ef366898df8f5773391bae17 /src/pipeline.c
libvlkn: inital commit, basic prototype api in placeHEADmain
Signed-off-by: Anna (navi) Figueiredo Gomes <navi@vlhl.dev>
Diffstat (limited to 'src/pipeline.c')
-rw-r--r--src/pipeline.c347
1 files changed, 347 insertions, 0 deletions
diff --git a/src/pipeline.c b/src/pipeline.c
new file mode 100644
index 0000000..f06ba7b
--- /dev/null
+++ b/src/pipeline.c
@@ -0,0 +1,347 @@
+#include <vlkn.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include "pipeline.h"
+#include "utils.h"
+#include "renderer.h"
+
+struct descriptor_allocator descriptor_init(uint32_t sets, size_t ratio_count, struct pool_ratio ratios[ratio_count]) {
+ struct descriptor_allocator alloc = {
+ .set_count = sets,
+ .ratio_count = ratio_count,
+ .ratios = calloc(ratio_count, sizeof(*ratios))
+ };
+ if (!alloc.ratios)
+ abort();
+ memcpy(alloc.ratios, ratios, sizeof(*ratios) * ratio_count);
+ array_init(alloc.ready);
+ array_init(alloc.full);
+ return alloc;
+}
+
+static VkDescriptorPool create_pool(VkDevice dev, struct descriptor_allocator *allocator) {
+ VkDescriptorPoolSize pool_sizes[allocator->ratio_count];
+ for (size_t i = 0; i < allocator->ratio_count; i++) {
+ pool_sizes[i] = (VkDescriptorPoolSize) {
+ .type = allocator->ratios[i].type,
+ .descriptorCount = allocator->ratios[i].ratio * allocator->set_count
+ };
+ }
+
+ VkDescriptorPoolCreateInfo pool_info = {
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
+ .maxSets = allocator->set_count,
+ .poolSizeCount = allocator->ratio_count,
+ .pPoolSizes = pool_sizes
+ };
+
+ VkDescriptorPool new_pool;
+ // FIXME: verify result
+ vkCreateDescriptorPool(dev, &pool_info, NULL, &new_pool);
+ return new_pool;
+}
+
+static VkDescriptorPool get_pool(VkDevice dev, struct descriptor_allocator *allocator) {
+ if (allocator->ready.len > 0) {
+ assert(allocator->ready.data);
+ return allocator->ready.data[--allocator->ready.len];
+ }
+
+ VkDescriptorPool pool = create_pool(dev, allocator);
+ allocator->set_count *= 1.5;
+ if (allocator->set_count > 4092)
+ allocator->set_count = 4092;
+
+ return pool;
+}
+
+VkDescriptorSet descriptor_allocate(struct descriptor_allocator *alloc, VkDevice dev, VkDescriptorSetLayout layout) {
+ VkDescriptorPool pool = get_pool(dev, alloc);
+
+ VkDescriptorSetAllocateInfo alloc_info = {
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
+ .descriptorPool = pool,
+ .descriptorSetCount = 1,
+ .pSetLayouts = &layout
+ };
+
+ VkDescriptorSet set;
+ switch (vkAllocateDescriptorSets(dev, &alloc_info, &set)) {
+ case VK_ERROR_OUT_OF_POOL_MEMORY:
+ case VK_ERROR_FRAGMENTED_POOL:
+ array_append(alloc->full, pool);
+ pool = get_pool(dev, alloc);
+ alloc_info.descriptorPool = pool;
+ // FIXME: check properly
+ if (vkAllocateDescriptorSets(dev, &alloc_info, &set) != VK_SUCCESS)
+ abort();
+ break;
+ case VK_SUCCESS:
+ break;
+ default:
+ abort();
+ }
+
+ array_append(alloc->ready, pool);
+ return set;
+}
+
+void descriptor_destroy(struct descriptor_allocator *alloc, VkDevice dev) {
+ for (size_t i = 0; i < alloc->full.len; i++)
+ vkDestroyDescriptorPool(dev, alloc->full.data[i], NULL);
+ for (size_t i = 0; i < alloc->ready.len; i++)
+ vkDestroyDescriptorPool(dev, alloc->ready.data[i], NULL);
+ alloc->full.len = alloc->ready.len = 0;
+}
+
+// ---
+
+struct descriptor_writer descriptor_write_buffer(size_t binding,
+ VkBuffer buffer, size_t size, size_t offset, VkDescriptorType type) {
+ struct descriptor_writer writer = {
+ .type = DESCRIPTOR_WRITER_BUFFER,
+ .buffer_info = {
+ .buffer = buffer,
+ .offset = offset,
+ .range = size
+ },
+ .write_info = {
+ .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ .dstBinding = binding,
+ .descriptorCount = 1,
+ .descriptorType = type,
+ }
+ };
+ return writer;
+}
+
+struct descriptor_writer descriptor_write_image(size_t binding,
+ VkImageView view, VkSampler sampler, VkImageLayout layout, VkDescriptorType type) {
+ struct descriptor_writer writer = {
+ .type = DESCRIPTOR_WRITER_IMAGE,
+ .image_info = {
+ .sampler = sampler,
+ .imageView = view,
+ .imageLayout = layout
+ },
+ .write_info = {
+ .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ .dstBinding = binding,
+ .descriptorCount = 1,
+ .descriptorType = type,
+ }
+ };
+ return writer;
+}
+
+// TODO: benchmark this against a vkguide-like solution
+void descriptor_update(VkDevice dev, VkDescriptorSet set,
+ size_t write_count, struct descriptor_writer write_sets[write_count]) {
+ VkWriteDescriptorSet writers[write_count];
+ for (size_t i = 0; i < write_count; i++) {
+ writers[i] = write_sets[i].write_info;
+ writers[i].dstSet = set;
+ switch(write_sets[i].type) {
+ case DESCRIPTOR_WRITER_BUFFER:
+ writers[i].pBufferInfo = &write_sets[i].buffer_info;
+ break;
+ case DESCRIPTOR_WRITER_IMAGE:
+ writers[i].pImageInfo = &write_sets[i].image_info;
+ break;
+ }
+ }
+ vkUpdateDescriptorSets(dev, write_count, writers, 0, NULL);
+}
+
+struct vlkn_pipeline *vlkn_pipeline_init(struct vlkn_renderer *renderer, struct vlkn_pipeline_opts *opts) {
+ struct vlkn_pipeline *pipeline = calloc(1, sizeof(*pipeline));
+ if (!pipeline)
+ return NULL;
+
+ struct pool_ratio ratios[opts->descriptors_len];
+
+ struct VkPipelineShaderStageCreateInfo shaders[opts->shader_len];
+ for (size_t i = 0; i < opts->shader_len; i++) {
+ shaders[i] = (VkPipelineShaderStageCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ .module = opts->shaders[i].module,
+ .stage = opts->shaders[i].stages,
+ .pName = "main"
+ };
+ }
+
+ VkPipelineVertexInputStateCreateInfo vertex_input = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+ .vertexBindingDescriptionCount = 1,
+ .pVertexBindingDescriptions = &opts->attrs_desc,
+ .vertexAttributeDescriptionCount = opts->attrs_len,
+ .pVertexAttributeDescriptions = opts->attrs
+ };
+
+ VkPipelineInputAssemblyStateCreateInfo input_assembly = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
+ .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
+ .primitiveRestartEnable = VK_FALSE
+ };
+
+ VkDynamicState dyn_states[] = {
+ VK_DYNAMIC_STATE_VIEWPORT,
+ VK_DYNAMIC_STATE_SCISSOR
+ };
+
+ VkPipelineDynamicStateCreateInfo dynamic_state_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
+ .dynamicStateCount = 2,
+ .pDynamicStates = dyn_states
+ };
+
+ VkPipelineViewportStateCreateInfo viewport_state = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+ .viewportCount = 1,
+ .scissorCount = 1
+ };
+
+ VkPipelineRasterizationStateCreateInfo rasterizer = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
+ .depthClampEnable = VK_FALSE,
+ .rasterizerDiscardEnable = VK_FALSE,
+ .polygonMode = VK_POLYGON_MODE_FILL,
+ .lineWidth = 1.0f,
+ .cullMode = VK_CULL_MODE_BACK_BIT,
+ .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
+ .depthBiasEnable = VK_FALSE,
+ };
+
+ VkPipelineMultisampleStateCreateInfo multisampling = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+ .sampleShadingEnable = VK_FALSE,
+ .rasterizationSamples = VK_SAMPLE_COUNT_1_BIT,
+ .minSampleShading = 1.0f,
+ };
+
+ VkPipelineColorBlendAttachmentState color_state = {
+ .colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT,
+ .blendEnable = VK_FALSE
+ };
+
+ VkPipelineColorBlendStateCreateInfo color_blending = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
+ .logicOpEnable = VK_FALSE,
+ .attachmentCount = 1,
+ .pAttachments = &color_state
+ };
+
+ VkPipelineDepthStencilStateCreateInfo depth_stencil = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
+ .depthTestEnable = VK_TRUE,
+ .depthWriteEnable = VK_TRUE,
+ .depthCompareOp = VK_COMPARE_OP_LESS,
+ .depthBoundsTestEnable = VK_FALSE,
+ .minDepthBounds = 0.0f,
+ .maxDepthBounds = 1.0,
+ .stencilTestEnable = VK_FALSE,
+ };
+
+ VkDescriptorSetLayoutCreateInfo desc_create_info = {
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+ .bindingCount = opts->descriptors_len,
+ .pBindings = opts->descriptors
+ };
+
+ if (!vlkn_check(vkCreateDescriptorSetLayout(renderer->gpu.device, &desc_create_info, NULL, &pipeline->descriptor_layout)))
+ goto err;
+
+ for (size_t i = 0; i < opts->descriptors_len; i++) {
+ ratios[i].type = opts->descriptors->descriptorType;
+ ratios[i].ratio = 1;
+ }
+
+ pipeline->allocator = descriptor_init(1, opts->descriptors_len, ratios);
+
+ VkPipelineLayoutCreateInfo pipeline_layout_create_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ .setLayoutCount = 1,
+ .pSetLayouts = &pipeline->descriptor_layout,
+ .pushConstantRangeCount = opts->push_contant_len,
+ .pPushConstantRanges = opts->push_constants
+ };
+
+ if (!vlkn_check(vkCreatePipelineLayout(renderer->gpu.device, &pipeline_layout_create_info, NULL, &pipeline->layout)))
+ goto err;
+
+ VkGraphicsPipelineCreateInfo pipeline_info = {
+ .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+ .stageCount = opts->shader_len,
+ .pStages = shaders,
+ .pVertexInputState = &vertex_input,
+ .pInputAssemblyState = &input_assembly,
+ .pViewportState = &viewport_state,
+ .pRasterizationState = &rasterizer,
+ .pMultisampleState = &multisampling,
+ .pColorBlendState = &color_blending,
+ .pDynamicState = &dynamic_state_info,
+ .pDepthStencilState = &depth_stencil,
+ .layout = pipeline->layout,
+ .renderPass = renderer->render_pass,
+ .subpass = 0,
+ .basePipelineHandle = VK_NULL_HANDLE,
+ .basePipelineIndex = -1
+ };
+
+ if (!vlkn_check(vkCreateGraphicsPipelines(renderer->gpu.device, VK_NULL_HANDLE, 1, &pipeline_info, NULL, &pipeline->pipeline)))
+ goto err;
+
+ goto out;
+
+err:
+ vkDestroyDescriptorSetLayout(renderer->gpu.device, pipeline->descriptor_layout, NULL);
+ vkDestroyPipelineLayout(renderer->gpu.device, pipeline->layout, NULL);
+ pipeline = (free(pipeline), NULL);
+out:
+ return pipeline;
+}
+
+bool vlkn_shader_load(struct vlkn_renderer *renderer,
+ const char *path, VkShaderStageFlagBits stage, struct vlkn_shader *shader) {
+ if (!path) {
+ dbglog("attempted to load shader with path as null");
+ return false;
+ }
+
+ FILE *fp = fopen(path, "rb");
+ if (!fp) {
+ dbglogf("failed to load pipeline %s.", path);
+ return false;
+ }
+
+ fseek(fp, 0, SEEK_END);
+ size_t len = ftell(fp);
+ rewind(fp);
+
+ uint8_t *bytes = calloc(len, sizeof(*bytes));
+ fread(bytes, sizeof(*bytes), len, fp);
+ fclose(fp);
+
+ VkShaderModuleCreateInfo create_info = {
+ .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
+ .codeSize = len,
+ .pCode = (uint32_t *)bytes
+ };
+
+ bool ret = true;
+ if (!vlkn_check(vkCreateShaderModule(renderer->gpu.device, &create_info, NULL, &shader->module))) {
+ ret = false;
+ }
+
+ shader->stages = stage;
+
+ free(bytes);
+ return ret;
+}
+
+void vlkn_shader_unload(struct vlkn_renderer *renderer, struct vlkn_shader *shader) {
+ vkDestroyShaderModule(renderer->gpu.device, shader->module, NULL);
+}