diff options
author | Simon Ser <contact@emersion.fr> | 2022-11-25 12:04:28 +0100 |
---|---|---|
committer | Simon Ser <contact@emersion.fr> | 2022-11-28 23:50:41 +0000 |
commit | 10f543d5792ec0a74688c4da3781f6f84fb2ed9d (patch) | |
tree | 3f4982e8c9b04bd8636df1a1f715b5721a67ebd0 | |
parent | 2a414c896ec7a4e492d81bc758248c920b47e8d1 (diff) |
render/vulkan: release stage buffers after command buffer completes
We need to wait for the pending command buffer to complete before
re-using stage buffers. Otherwise we'll overwrite the stage buffer
with new contents before the texture is fully uploaded.
-rw-r--r-- | include/render/vulkan.h | 4 | ||||
-rw-r--r-- | render/vulkan/renderer.c | 34 |
2 files changed, 25 insertions, 13 deletions
diff --git a/include/render/vulkan.h b/include/render/vulkan.h index 5b47e8ef..d21a7ca0 100644 --- a/include/render/vulkan.h +++ b/include/render/vulkan.h @@ -150,6 +150,8 @@ struct wlr_vk_command_buffer { uint64_t timeline_point; // Textures to destroy after the command buffer completes struct wl_list destroy_textures; // wlr_vk_texture.destroy_link + // Staging shared buffers to release after the command buffer completes + struct wl_list stage_buffers; // wlr_vk_shared_buffer.link }; #define VULKAN_COMMAND_BUFFERS_CAP 64 @@ -292,7 +294,7 @@ struct wlr_vk_allocation { // List of suballocated staging buffers. // Used to upload to/read from device local images. struct wlr_vk_shared_buffer { - struct wl_list link; // wlr_vk_renderer.stage.buffers + struct wl_list link; // wlr_vk_renderer.stage.buffers or wlr_vk_command_buffer.stage_buffers VkBuffer buffer; VkDeviceMemory memory; VkDeviceSize buf_size; diff --git a/render/vulkan/renderer.c b/render/vulkan/renderer.c index f09da5e4..91667405 100644 --- a/render/vulkan/renderer.c +++ b/render/vulkan/renderer.c @@ -192,13 +192,6 @@ static void shared_buffer_destroy(struct wlr_vk_renderer *r, free(buffer); } -static void release_stage_allocations(struct wlr_vk_renderer *renderer) { - struct wlr_vk_shared_buffer *buf; - wl_list_for_each(buf, &renderer->stage.buffers, link) { - buf->allocs.size = 0; - } -} - struct wlr_vk_buffer_span vulkan_get_stage_span(struct wlr_vk_renderer *r, VkDeviceSize size, VkDeviceSize alignment) { // try to find free span @@ -426,6 +419,7 @@ static bool init_command_buffer(struct wlr_vk_command_buffer *cb, .vk = vk_cb, }; wl_list_init(&cb->destroy_textures); + wl_list_init(&cb->stage_buffers); return true; } @@ -450,13 +444,22 @@ static bool wait_command_buffer(struct wlr_vk_command_buffer *cb, return true; } -static void release_command_buffer_resources(struct wlr_vk_command_buffer *cb) { +static void release_command_buffer_resources(struct wlr_vk_command_buffer *cb, + struct wlr_vk_renderer *renderer) { struct wlr_vk_texture *texture, *texture_tmp; wl_list_for_each_safe(texture, texture_tmp, &cb->destroy_textures, destroy_link) { wl_list_remove(&texture->destroy_link); texture->last_used_cb = NULL; wlr_texture_destroy(&texture->wlr_texture); } + + struct wlr_vk_shared_buffer *buf, *buf_tmp; + wl_list_for_each_safe(buf, buf_tmp, &cb->stage_buffers, link) { + buf->allocs.size = 0; + + wl_list_remove(&buf->link); + wl_list_insert(&renderer->stage.buffers, &buf->link); + } } static struct wlr_vk_command_buffer *get_command_buffer( @@ -476,7 +479,7 @@ static struct wlr_vk_command_buffer *get_command_buffer( struct wlr_vk_command_buffer *cb = &renderer->command_buffers[i]; if (cb->vk != VK_NULL_HANDLE && !cb->recording && cb->timeline_point <= current_point) { - release_command_buffer_resources(cb); + release_command_buffer_resources(cb, renderer); } } @@ -955,14 +958,21 @@ static void vulkan_end(struct wlr_renderer *wlr_renderer) { return; } + struct wlr_vk_shared_buffer *stage_buf, *stage_buf_tmp; + wl_list_for_each_safe(stage_buf, stage_buf_tmp, &renderer->stage.buffers, link) { + if (stage_buf->allocs.size == 0) { + continue; + } + wl_list_remove(&stage_buf->link); + wl_list_insert(&stage_cb->stage_buffers, &stage_buf->link); + } + // sadly this is required due to the current api/rendering model of wlr // ideally we could use gpu and cpu in parallel (_without_ the // implicit synchronization overhead and mess of opengl drivers) if (!wait_command_buffer(render_cb, renderer)) { return; } - - release_stage_allocations(renderer); } static bool vulkan_render_subtexture_with_matrix(struct wlr_renderer *wlr_renderer, @@ -1162,7 +1172,7 @@ static void vulkan_destroy(struct wlr_renderer *wlr_renderer) { if (cb->vk == VK_NULL_HANDLE) { continue; } - release_command_buffer_resources(cb); + release_command_buffer_resources(cb, renderer); } // stage.cb automatically freed with command pool |