aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.builds/alpine.yml3
-rw-r--r--.builds/archlinux.yml3
-rw-r--r--.builds/freebsd.yml3
-rw-r--r--include/meson.build3
-rw-r--r--include/render/vulkan.h312
-rw-r--r--include/wlr/config.h.in2
-rw-r--r--include/wlr/render/vulkan.h18
-rw-r--r--meson.build1
-rw-r--r--meson_options.txt2
-rw-r--r--render/meson.build6
-rw-r--r--render/vulkan/meson.build38
-rw-r--r--render/vulkan/pixel_format.c325
-rw-r--r--render/vulkan/renderer.c1540
-rw-r--r--render/vulkan/shaders/common.vert25
-rw-r--r--render/vulkan/shaders/meson.build20
-rw-r--r--render/vulkan/shaders/quad.frag10
-rw-r--r--render/vulkan/shaders/texture.frag25
-rw-r--r--render/vulkan/texture.c718
-rw-r--r--render/vulkan/util.c93
-rw-r--r--render/vulkan/vulkan.c550
-rw-r--r--render/wlr_renderer.c9
21 files changed, 3704 insertions, 2 deletions
diff --git a/.builds/alpine.yml b/.builds/alpine.yml
index c636ece6..e4c654b1 100644
--- a/.builds/alpine.yml
+++ b/.builds/alpine.yml
@@ -2,11 +2,14 @@ image: alpine/edge
packages:
- eudev-dev
- ffmpeg-dev
+ - glslang
- libinput-dev
- libxkbcommon-dev
- mesa-dev
- meson
- pixman-dev
+ - vulkan-headers
+ - vulkan-loader-dev
- wayland-dev
- wayland-protocols
- xcb-util-image-dev
diff --git a/.builds/archlinux.yml b/.builds/archlinux.yml
index bbc4874b..1c89ca6f 100644
--- a/.builds/archlinux.yml
+++ b/.builds/archlinux.yml
@@ -15,6 +15,9 @@ packages:
- xcb-util-wm
- xorg-xwayland
- seatd
+ - vulkan-icd-loader
+ - vulkan-headers
+ - glslang
sources:
- https://github.com/swaywm/wlroots
tasks:
diff --git a/.builds/freebsd.yml b/.builds/freebsd.yml
index a570e204..368167c8 100644
--- a/.builds/freebsd.yml
+++ b/.builds/freebsd.yml
@@ -5,9 +5,12 @@ packages:
- devel/libudev-devd
- devel/meson # implies ninja
- devel/pkgconf
+ - graphics/glslang
- graphics/libdrm
- graphics/mesa-libs
- graphics/png
+ - graphics/vulkan-headers
+ - graphics/vulkan-loader
- graphics/wayland
- graphics/wayland-protocols
- multimedia/ffmpeg
diff --git a/include/meson.build b/include/meson.build
index ff951108..3aa5955e 100644
--- a/include/meson.build
+++ b/include/meson.build
@@ -18,6 +18,9 @@ endif
if not features.get('gles2-renderer')
exclude_files += ['render/egl.h', 'render/gles2.h']
endif
+if not features.get('vulkan-renderer')
+ exclude_files += 'render/vulkan.h'
+endif
install_subdir('wlr',
install_dir: get_option('includedir'),
diff --git a/include/render/vulkan.h b/include/render/vulkan.h
new file mode 100644
index 00000000..aa40198c
--- /dev/null
+++ b/include/render/vulkan.h
@@ -0,0 +1,312 @@
+#ifndef RENDER_VULKAN_H
+#define RENDER_VULKAN_H
+
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+#include <vulkan/vulkan.h>
+#include <wlr/render/wlr_renderer.h>
+#include <wlr/render/wlr_texture.h>
+#include <wlr/render/drm_format_set.h>
+#include <wlr/render/interface.h>
+
+struct wlr_vk_descriptor_pool;
+
+// Central vulkan state that should only be needed once per compositor.
+struct wlr_vk_instance {
+ VkInstance instance;
+ VkDebugUtilsMessengerEXT messenger;
+
+ // enabled extensions
+ size_t extension_count;
+ const char **extensions;
+
+ struct {
+ PFN_vkCreateDebugUtilsMessengerEXT createDebugUtilsMessengerEXT;
+ PFN_vkDestroyDebugUtilsMessengerEXT destroyDebugUtilsMessengerEXT;
+ } api;
+};
+
+// Creates and initializes a vulkan instance.
+// Will try to enable the given extensions but not fail if they are not
+// available which can later be checked by the caller.
+// The debug parameter determines if validation layers are enabled and a
+// debug messenger created.
+// `compositor_name` and `compositor_version` are passed to the vulkan driver.
+struct wlr_vk_instance *vulkan_instance_create(size_t ext_count,
+ const char **exts, bool debug);
+void vulkan_instance_destroy(struct wlr_vk_instance *ini);
+
+// Logical vulkan device state.
+// Ownership can be shared by multiple renderers, reference counted
+// with `renderers`.
+struct wlr_vk_device {
+ struct wlr_vk_instance *instance;
+
+ VkPhysicalDevice phdev;
+ VkDevice dev;
+
+ int drm_fd;
+
+ // enabled extensions
+ size_t extension_count;
+ const char **extensions;
+
+ // we only ever need one queue for rendering and transfer commands
+ uint32_t queue_family;
+ VkQueue queue;
+
+ struct {
+ PFN_vkGetMemoryFdPropertiesKHR getMemoryFdPropertiesKHR;
+ } api;
+
+ uint32_t format_prop_count;
+ struct wlr_vk_format_props *format_props;
+ struct wlr_drm_format_set dmabuf_render_formats;
+ struct wlr_drm_format_set dmabuf_texture_formats;
+
+ // supported formats for textures (contains only those formats
+ // that support everything we need for textures)
+ uint32_t shm_format_count;
+ uint32_t *shm_formats; // to implement vulkan_get_shm_texture_formats
+};
+
+// Tries to find the VkPhysicalDevice for the given drm fd.
+// Might find none and return VK_NULL_HANDLE.
+VkPhysicalDevice vulkan_find_drm_phdev(struct wlr_vk_instance *ini, int drm_fd);
+
+// Creates a device for the given instance and physical device.
+// Will try to enable the given extensions but not fail if they are not
+// available which can later be checked by the caller.
+struct wlr_vk_device *vulkan_device_create(struct wlr_vk_instance *ini,
+ VkPhysicalDevice phdev, size_t ext_count, const char **exts);
+void vulkan_device_destroy(struct wlr_vk_device *dev);
+
+// Tries to find any memory bit for the given vulkan device that
+// supports the given flags and is set in req_bits (e.g. if memory
+// type 2 is ok, (req_bits & (1 << 2)) must not be 0.
+// Set req_bits to 0xFFFFFFFF to allow all types.
+int vulkan_find_mem_type(struct wlr_vk_device *device,
+ VkMemoryPropertyFlags flags, uint32_t req_bits);
+
+struct wlr_vk_format {
+ uint32_t drm_format;
+ VkFormat vk_format;
+};
+
+// Returns all known format mappings.
+// Might not be supported for gpu/usecase.
+const struct wlr_vk_format *vulkan_get_format_list(size_t *len);
+const struct wlr_vk_format *vulkan_get_format_from_drm(uint32_t drm_format);
+
+struct wlr_vk_format_modifier_props {
+ VkDrmFormatModifierPropertiesEXT props;
+ VkExternalMemoryFeatureFlags dmabuf_flags;
+ VkExtent2D max_extent;
+ bool export_imported;
+};
+
+struct wlr_vk_format_props {
+ struct wlr_vk_format format;
+ VkExtent2D max_extent; // relevant if not created as dma_buf
+ VkFormatFeatureFlags features; // relevant if not created as dma_buf
+
+ uint32_t render_mod_count;
+ struct wlr_vk_format_modifier_props *render_mods;
+
+ uint32_t texture_mod_count;
+ struct wlr_vk_format_modifier_props *texture_mods;
+};
+
+void vulkan_format_props_query(struct wlr_vk_device *dev,
+ const struct wlr_vk_format *format);
+struct wlr_vk_format_modifier_props *vulkan_format_props_find_modifier(
+ struct wlr_vk_format_props *props, uint64_t mod, bool render);
+void vulkan_format_props_finish(struct wlr_vk_format_props *props);
+
+// For each format we want to render, we need a separate renderpass
+// and therefore also separate pipelines.
+struct wlr_vk_render_format_setup {
+ struct wl_list link;
+ VkFormat render_format; // used in renderpass
+ VkRenderPass render_pass;
+
+ VkPipeline tex_pipe;
+ VkPipeline quad_pipe;
+};
+
+// Renderer-internal represenation of an wlr_buffer imported for rendering.
+struct wlr_vk_render_buffer {
+ struct wlr_buffer *wlr_buffer;
+ struct wlr_vk_renderer *renderer;
+ struct wlr_vk_render_format_setup *render_setup;
+ struct wl_list link; // wlr_vk_renderer.buffers
+
+ VkImage image;
+ VkImageView image_view;
+ VkFramebuffer framebuffer;
+ uint32_t mem_count;
+ VkDeviceMemory memories[WLR_DMABUF_MAX_PLANES];
+ bool transitioned;
+
+ struct wl_listener buffer_destroy;
+};
+
+// Vulkan wlr_renderer implementation on top of a wlr_vk_device.
+struct wlr_vk_renderer {
+ struct wlr_renderer wlr_renderer;
+ struct wlr_backend *backend;
+ struct wlr_vk_device *dev;
+
+ VkCommandPool command_pool;
+
+ VkShaderModule vert_module;
+ VkShaderModule tex_frag_module;
+ VkShaderModule quad_frag_module;
+
+ VkDescriptorSetLayout ds_layout;
+ VkPipelineLayout pipe_layout;
+ VkSampler sampler;
+
+ VkFence fence;
+
+ struct wlr_vk_render_buffer *current_render_buffer;
+
+ // current frame id. Used in wlr_vk_texture.last_used
+ // Increased every time a frame is ended for the renderer
+ uint32_t frame;
+ VkRect2D scissor; // needed for clearing
+
+ VkCommandBuffer cb;
+ VkPipeline bound_pipe;
+
+ uint32_t render_width;
+ uint32_t render_height;
+ float projection[9];
+
+ size_t last_pool_size;
+ struct wl_list descriptor_pools; // type wlr_vk_descriptor_pool
+ struct wl_list render_format_setups;
+
+ struct wl_list textures; // wlr_gles2_texture.link
+ struct wl_list destroy_textures; // wlr_vk_texture to destroy after frame
+ struct wl_list foreign_textures; // wlr_vk_texture to return to foreign queue
+
+ struct wl_list render_buffers; // wlr_vk_render_buffer
+
+ struct {
+ VkCommandBuffer cb;
+ bool recording;
+ struct wl_list buffers; // type wlr_vk_shared_buffer
+ } stage;
+};
+
+// Creates a vulkan renderer for the given device.
+struct wlr_renderer *vulkan_renderer_create_for_device(struct wlr_vk_device *dev);
+
+// stage utility - for uploading/retrieving data
+// Gets an command buffer in recording state which is guaranteed to be
+// executed before the next frame.
+VkCommandBuffer vulkan_record_stage_cb(struct wlr_vk_renderer *renderer);
+
+// Submits the current stage command buffer and waits until it has
+// finished execution.
+bool vulkan_submit_stage_wait(struct wlr_vk_renderer *renderer);
+
+// Suballocates a buffer span with the given size that can be mapped
+// and used as staging buffer. The allocation is implicitly released when the
+// stage cb has finished execution.
+struct wlr_vk_buffer_span vulkan_get_stage_span(
+ struct wlr_vk_renderer *renderer, VkDeviceSize size);
+
+// Tries to allocate a texture descriptor set. Will additionally
+// return the pool it was allocated from when successful (for freeing it later).
+struct wlr_vk_descriptor_pool *vulkan_alloc_texture_ds(
+ struct wlr_vk_renderer *renderer, VkDescriptorSet *ds);
+
+// Frees the given descriptor set from the pool its pool.
+void vulkan_free_ds(struct wlr_vk_renderer *renderer,
+ struct wlr_vk_descriptor_pool *pool, VkDescriptorSet ds);
+struct wlr_vk_format_props *vulkan_format_props_from_drm(
+ struct wlr_vk_device *dev, uint32_t drm_format);
+struct wlr_vk_renderer *vulkan_get_renderer(struct wlr_renderer *r);
+
+// State (e.g. image texture) associated with a surface.
+struct wlr_vk_texture {
+ struct wlr_texture wlr_texture;
+ struct wlr_vk_renderer *renderer;
+ uint32_t mem_count;
+ VkDeviceMemory memories[WLR_DMABUF_MAX_PLANES];
+ VkImage image;
+ VkImageView image_view;
+ const struct wlr_vk_format *format;
+ VkDescriptorSet ds;
+ struct wlr_vk_descriptor_pool *ds_pool;
+ uint32_t last_used; // to track when it can be destroyed
+ bool dmabuf_imported;
+ bool owned; // if dmabuf_imported: whether we have ownership of the image
+ bool transitioned; // if dma_imported: whether we transitioned it away from preinit
+ bool invert_y; // if dma_imported: whether we must flip y
+ struct wl_list foreign_link;
+ struct wl_list destroy_link;
+ struct wl_list link; // wlr_gles2_renderer.textures
+
+ // If imported from a wlr_buffer
+ struct wlr_buffer *buffer;
+ struct wl_listener buffer_destroy;
+};
+
+struct wlr_vk_texture *vulkan_get_texture(struct wlr_texture *wlr_texture);
+VkImage vulkan_import_dmabuf(struct wlr_vk_renderer *renderer,
+ const struct wlr_dmabuf_attributes *attribs,
+ VkDeviceMemory mems[static WLR_DMABUF_MAX_PLANES], uint32_t *n_mems,
+ bool for_render);
+struct wlr_texture *vulkan_texture_from_buffer(
+ struct wlr_renderer *wlr_renderer, struct wlr_buffer *buffer);
+void vulkan_texture_destroy(struct wlr_vk_texture *texture);
+
+struct wlr_vk_descriptor_pool {
+ VkDescriptorPool pool;
+ uint32_t free; // number of textures that can be allocated
+ struct wl_list link;
+};
+
+struct wlr_vk_allocation {
+ VkDeviceSize start;
+ VkDeviceSize size;
+};
+
+// List of suballocated staging buffers.
+// Used to upload to/read from device local images.
+struct wlr_vk_shared_buffer {
+ struct wl_list link;
+ VkBuffer buffer;
+ VkDeviceMemory memory;
+ VkDeviceSize buf_size;
+
+ size_t allocs_size;
+ size_t allocs_capacity;
+ struct wlr_vk_allocation *allocs;
+};
+
+// Suballocated range on a buffer.
+struct wlr_vk_buffer_span {
+ struct wlr_vk_shared_buffer *buffer;
+ struct wlr_vk_allocation alloc;
+};
+
+// util
+bool vulkan_has_extension(size_t count, const char **exts, const char *find);
+const char *vulkan_strerror(VkResult err);
+void vulkan_change_layout(VkCommandBuffer cb, VkImage img,
+ VkImageLayout ol, VkPipelineStageFlags srcs, VkAccessFlags srca,
+ VkImageLayout nl, VkPipelineStageFlags dsts, VkAccessFlags dsta);
+void vulkan_change_layout_queue(VkCommandBuffer cb, VkImage img,
+ VkImageLayout ol, VkPipelineStageFlags srcs, VkAccessFlags srca,
+ VkImageLayout nl, VkPipelineStageFlags dsts, VkAccessFlags dsta,
+ uint32_t src_family, uint32_t dst_family);
+
+#define wlr_vk_error(fmt, res, ...) wlr_log(WLR_ERROR, fmt ": %s (%d)", \
+ vulkan_strerror(res), res, ##__VA_ARGS__)
+
+#endif // RENDER_VULKAN_H
diff --git a/include/wlr/config.h.in b/include/wlr/config.h.in
index f8e55149..71868a34 100644
--- a/include/wlr/config.h.in
+++ b/include/wlr/config.h.in
@@ -7,6 +7,8 @@
#mesondefine WLR_HAS_GLES2_RENDERER
+#mesondefine WLR_HAS_VULKAN_RENDERER
+
#mesondefine WLR_HAS_XWAYLAND
#endif
diff --git a/include/wlr/render/vulkan.h b/include/wlr/render/vulkan.h
new file mode 100644
index 00000000..0302d85b
--- /dev/null
+++ b/include/wlr/render/vulkan.h
@@ -0,0 +1,18 @@
+/*
+ * This an unstable interface of wlroots. No guarantees are made regarding the
+ * future consistency of this API.
+ */
+#ifndef WLR_USE_UNSTABLE
+#error "Add -DWLR_USE_UNSTABLE to enable unstable wlroots features"
+#endif
+
+#ifndef WLR_RENDER_VULKAN_H
+#define WLR_RENDER_VULKAN_H
+
+#include <wlr/render/wlr_renderer.h>
+
+struct wlr_renderer *wlr_vk_renderer_create_with_drm_fd(int drm_fd);
+bool wlr_texture_is_vk(struct wlr_texture *texture);
+
+#endif
+
diff --git a/meson.build b/meson.build
index 3e33c9de..c546b74d 100644
--- a/meson.build
+++ b/meson.build
@@ -91,6 +91,7 @@ features = {
'libinput-backend': false,
'xwayland': false,
'gles2-renderer': false,
+ 'vulkan-renderer': false,
}
internal_features = {
'xcb-errors': false,
diff --git a/meson_options.txt b/meson_options.txt
index 7e4fd7bd..550acbe6 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -2,5 +2,5 @@ option('xcb-errors', type: 'feature', value: 'auto', description: 'Use xcb-error
option('xwayland', type: 'feature', value: 'auto', yield: true, description: 'Enable support for X11 applications')
option('examples', type: 'boolean', value: true, description: 'Build example applications')
option('icon_directory', description: 'Location used to look for cursors (default: ${datadir}/icons)', type: 'string', value: '')
-option('renderers', type: 'array', choices: ['auto', 'gles2'], value: ['auto'], description: 'Select built-in renderers')
+option('renderers', type: 'array', choices: ['auto', 'gles2', 'vulkan'], value: ['auto'], description: 'Select built-in renderers')
option('backends', type: 'array', choices: ['auto', 'drm', 'libinput', 'x11'], value: ['auto'], description: 'Select built-in backends')
diff --git a/render/meson.build b/render/meson.build
index b35ba7c9..2d651887 100644
--- a/render/meson.build
+++ b/render/meson.build
@@ -1,6 +1,6 @@
renderers = get_option('renderers')
if 'auto' in renderers and get_option('auto_features').enabled()
- renderers = ['gles2']
+ renderers = ['gles2', 'vulkan']
elif 'auto' in renderers and get_option('auto_features').disabled()
renderers = []
endif
@@ -24,6 +24,10 @@ if 'gles2' in renderers or 'auto' in renderers
subdir('gles2')
endif
+if 'vulkan' in renderers or 'auto' in renderers
+ subdir('vulkan')
+endif
+
subdir('pixman')
subdir('allocator')
diff --git a/render/vulkan/meson.build b/render/vulkan/meson.build
new file mode 100644
index 00000000..6b9ce840
--- /dev/null
+++ b/render/vulkan/meson.build
@@ -0,0 +1,38 @@
+msg = []
+if 'vulkan' in renderers
+ msg += 'Install "@0@" or pass "-Dvulkan=disabled" to disable it.'
+else
+ msg += 'Required for vulkan renderer support.'
+endif
+
+dep_vulkan = dependency('vulkan',
+ version: '>=1.2.182',
+ required: 'vulkan' in renderers,
+ not_found_message: '\n'.join(msg).format('vulkan')
+)
+
+if not dep_vulkan.found()
+ subdir_done()
+endif
+
+glslang = find_program('glslangValidator', native: true, required: false)
+if not glslang.found()
+ if 'vulkan' in renderers
+ error('\n'.join(msg).format('glslang'))
+ else
+ subdir_done()
+ endif
+endif
+
+wlr_files += files(
+ 'renderer.c',
+ 'texture.c',
+ 'vulkan.c',
+ 'util.c',
+ 'pixel_format.c',
+)
+
+wlr_deps += dep_vulkan
+features += { 'vulkan-renderer': true }
+
+subdir('shaders')
diff --git a/render/vulkan/pixel_format.c b/render/vulkan/pixel_format.c
new file mode 100644
index 00000000..cfab5ffe
--- /dev/null
+++ b/render/vulkan/pixel_format.c
@@ -0,0 +1,325 @@
+#include <drm_fourcc.h>
+#include <stdlib.h>
+#include <vulkan/vulkan.h>
+#include <wlr/util/log.h>
+#include "render/vulkan.h"
+
+// Reversed endianess of shm and vulkan format names
+static const struct wlr_vk_format formats[] = {
+ {
+ .drm_format = DRM_FORMAT_ARGB8888,
+ .vk_format = VK_FORMAT_B8G8R8A8_SRGB,
+ },
+ {
+ .drm_format = DRM_FORMAT_XRGB8888,
+ .vk_format = VK_FORMAT_B8G8R8A8_SRGB,
+ },
+ {
+ .drm_format = DRM_FORMAT_XBGR8888,
+ .vk_format = VK_FORMAT_R8G8B8A8_SRGB,
+ },
+ {
+ .drm_format = DRM_FORMAT_ABGR8888,
+ .vk_format = VK_FORMAT_R8G8B8A8_SRGB,
+ },
+};
+
+const struct wlr_vk_format *vulkan_get_format_list(size_t *len) {
+ *len = sizeof(formats) / sizeof(formats[0]);
+ return formats;
+}
+
+const struct wlr_vk_format *vulkan_get_format_from_drm(uint32_t drm_format) {
+ for (unsigned i = 0; i < sizeof(formats) / sizeof(formats[0]); ++i) {
+ if (formats[i].drm_format == drm_format) {
+ return &formats[i];
+ }
+ }
+ return NULL;
+}
+
+static const VkImageUsageFlags render_usage =
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+static const VkImageUsageFlags tex_usage =
+ VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+static const VkImageUsageFlags dma_tex_usage =
+ VK_IMAGE_USAGE_SAMPLED_BIT;
+
+static const VkFormatFeatureFlags tex_features =
+ VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
+ VK_FORMAT_FEATURE_TRANSFER_DST_BIT |
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
+ // NOTE: we don't strictly require this, we could create a NEAREST
+ // sampler for formats that need it, in case this ever makes problems.
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
+static const VkFormatFeatureFlags render_features =
+ VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
+ VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT;
+static const VkFormatFeatureFlags dma_tex_features =
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
+ // NOTE: we don't strictly require this, we could create a NEAREST
+ // sampler for formats that need it, in case this ever makes problems.
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
+
+static bool query_modifier_support(struct wlr_vk_device *dev,
+ struct wlr_vk_format_props *props, size_t modifier_count,
+ VkPhysicalDeviceImageFormatInfo2 fmti) {
+ VkResult res;
+
+ VkFormatProperties2 fmtp = {0};
+ fmtp.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2;
+
+ VkDrmFormatModifierPropertiesListEXT modp = {0};
+ modp.sType = VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT;
+ modp.drmFormatModifierCount = modifier_count;
+ fmtp.pNext = &modp;
+
+ // the first call to vkGetPhysicalDeviceFormatProperties2 did only
+ // retrieve the number of modifiers, we now have to retrieve
+ // the modifiers
+ modp.pDrmFormatModifierProperties = calloc(modifier_count,
+ sizeof(*modp.pDrmFormatModifierProperties));
+ if (!modp.pDrmFormatModifierProperties) {
+ wlr_log_errno(WLR_ERROR, "Allocation failed");
+ return false;
+ }
+
+ vkGetPhysicalDeviceFormatProperties2(dev->phdev,
+ props->format.vk_format, &fmtp);
+
+ props->render_mods = calloc(modp.drmFormatModifierCount,
+ sizeof(*props->render_mods));
+ if (!props->render_mods) {
+ wlr_log_errno(WLR_ERROR, "Allocation failed");
+ free(modp.pDrmFormatModifierProperties);
+ return false;
+ }
+
+ props->texture_mods = calloc(modp.drmFormatModifierCount,
+ sizeof(*props->texture_mods));
+ if (!props->texture_mods) {
+ wlr_log_errno(WLR_ERROR, "Allocation failed");
+ free(modp.pDrmFormatModifierProperties);
+ free(props->render_mods);
+ return false;
+ }
+
+ // detailed check
+ // format info
+ // only added if dmabuf/drm_fmt_ext supported
+ VkPhysicalDeviceExternalImageFormatInfo efmti = {0};
+ efmti.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO;
+ efmti.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
+
+ fmti.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
+ fmti.pNext = &efmti;
+
+ VkPhysicalDeviceImageDrmFormatModifierInfoEXT modi = {0};
+ modi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT;
+ modi.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ efmti.pNext = &modi;
+
+ // format properties
+ VkExternalImageFormatProperties efmtp = {0};
+ efmtp.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES;
+
+ VkImageFormatProperties2 ifmtp = {0};
+ ifmtp.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
+ ifmtp.pNext = &efmtp;
+ const VkExternalMemoryProperties *emp = &efmtp.externalMemoryProperties;
+
+ bool found = false;
+
+ for (unsigned i = 0u; i < modp.drmFormatModifierCount; ++i) {
+ VkDrmFormatModifierPropertiesEXT m =
+ modp.pDrmFormatModifierProperties[i];
+ wlr_log(WLR_DEBUG, " modifier: 0x%"PRIx64 ": features 0x%"PRIx32", %d planes",
+ m.drmFormatModifier, m.drmFormatModifierTilingFeatures,
+ m.drmFormatModifierPlaneCount);
+
+ // check that specific modifier for render usage
+ if ((m.drmFormatModifierTilingFeatures & render_features) == render_features) {
+ fmti.usage = render_usage;
+
+ modi.drmFormatModifier = m.drmFormatModifier;
+ res = vkGetPhysicalDeviceImageFormatProperties2(
+ dev->phdev, &fmti, &ifmtp);
+ if (res != VK_SUCCESS) {
+ if (res != VK_ERROR_FORMAT_NOT_SUPPORTED) {
+ wlr_vk_error("vkGetPhysicalDeviceImageFormatProperties2",
+ res);
+ }
+
+ wlr_log(WLR_DEBUG, " >> rendering: format not supported");
+ } else if (emp->externalMemoryFeatures &
+ VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT) {
+ unsigned c = props->render_mod_count;
+ VkExtent3D me = ifmtp.imageFormatProperties.maxExtent;
+ VkExternalMemoryProperties emp = efmtp.externalMemoryProperties;
+ props->render_mods[c].props = m;
+ props->render_mods[c].max_extent.width = me.width;
+ props->render_mods[c].max_extent.height = me.height;
+ props->render_mods[c].dmabuf_flags = emp.externalMemoryFeatures;
+ props->render_mods[c].export_imported =
+ (emp.exportFromImportedHandleTypes &
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
+ ++props->render_mod_count;
+
+ found = true;
+ wlr_drm_format_set_add(&dev->dmabuf_render_formats,
+ props->format.drm_format, m.drmFormatModifier);
+
+ wlr_log(WLR_DEBUG, " >> rendering: supported");
+ } else {
+ wlr_log(WLR_DEBUG, " >> rendering: importing not supported");
+ }
+ } else {
+ wlr_log(WLR_DEBUG, " >> rendering: format features not supported");
+ }
+
+ // check that specific modifier for texture usage
+ if ((m.drmFormatModifierTilingFeatures & dma_tex_features) == dma_tex_features) {
+ fmti.usage = dma_tex_usage;
+
+ modi.drmFormatModifier = m.drmFormatModifier;
+ res = vkGetPhysicalDeviceImageFormatProperties2(
+ dev->phdev, &fmti, &ifmtp);
+ if (res != VK_SUCCESS) {
+ if (res != VK_ERROR_FORMAT_NOT_SUPPORTED) {
+ wlr_vk_error("vkGetPhysicalDeviceImageFormatProperties2",
+ res);
+ }
+
+ wlr_log(WLR_DEBUG, " >> dmatex: format not supported");
+ } else if (emp->externalMemoryFeatures &
+ VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT) {
+ unsigned c = props->texture_mod_count;
+ VkExtent3D me = ifmtp.imageFormatProperties.maxExtent;
+ VkExternalMemoryProperties emp = efmtp.externalMemoryProperties;
+ props->texture_mods[c].props = m;
+ props->texture_mods[c].max_extent.width = me.width;
+ props->texture_mods[c].max_extent.height = me.height;
+ props->texture_mods[c].dmabuf_flags = emp.externalMemoryFeatures;
+ props->texture_mods[c].export_imported =
+ (emp.exportFromImportedHandleTypes &
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
+ ++props->texture_mod_count;
+
+ found = true;
+ wlr_drm_format_set_add(&dev->dmabuf_texture_formats,
+ props->format.drm_format, m.drmFormatModifier);
+
+ wlr_log(WLR_DEBUG, " >> dmatex: supported");
+ } else {
+ wlr_log(WLR_DEBUG, " >> dmatex: importing not supported");
+ }
+ } else {
+ wlr_log(WLR_DEBUG, " >> dmatex: format features not supported");
+ }
+ }
+
+ free(modp.pDrmFormatModifierProperties);
+ return found;
+}
+
+void vulkan_format_props_query(struct wlr_vk_device *dev,
+ const struct wlr_vk_format *format) {
+
+ wlr_log(WLR_DEBUG, "vulkan: Checking support for format %.4s (0x%" PRIx32 ")",
+ (const char *)&format->drm_format, format->drm_format);
+ VkResult res;
+
+ // get general features and modifiers
+ VkFormatProperties2 fmtp = {0};
+ fmtp.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2;
+
+ VkDrmFormatModifierPropertiesListEXT modp = {0};
+ modp.sType = VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT;
+ fmtp.pNext = &modp;
+
+ vkGetPhysicalDeviceFormatProperties2(dev->phdev,
+ format->vk_format, &fmtp);
+
+ // detailed check
+ VkPhysicalDeviceImageFormatInfo2 fmti = {0};
+ fmti.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
+ fmti.type = VK_IMAGE_TYPE_2D;
+ fmti.format = format->vk_format;
+
+ VkImageFormatProperties2 ifmtp = {0};
+ ifmtp.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
+
+ bool add_fmt_props = false;
+ struct wlr_vk_format_props props = {0};
+ props.format = *format;
+
+ wlr_log(WLR_DEBUG, " drmFormatModifierCount: %d", modp.drmFormatModifierCount);
+ if (modp.drmFormatModifierCount > 0) {
+ add_fmt_props |= query_modifier_support(dev, &props,
+ modp.drmFormatModifierCount, fmti);
+ }
+
+ // non-dmabuf texture properties
+ if (fmtp.formatProperties.optimalTilingFeatures & tex_features) {
+ fmti.pNext = NULL;
+ ifmtp.pNext = NULL;
+ fmti.tiling = VK_IMAGE_TILING_OPTIMAL;
+ fmti.usage = tex_usage;
+
+ res = vkGetPhysicalDeviceImageFormatProperties2(
+ dev->phdev, &fmti, &ifmtp);
+ if (res != VK_SUCCESS) {
+ if (res != VK_ERROR_FORMAT_NOT_SUPPORTED) {
+ wlr_vk_error("vkGetPhysicalDeviceImageFormatProperties2",
+ res);
+ }
+
+ wlr_log(WLR_DEBUG, " >> shmtex: format not supported");
+ } else {
+ VkExtent3D me = ifmtp.imageFormatProperties.maxExtent;
+ props.max_extent.width = me.width;
+ props.max_extent.height = me.height;
+ props.features = fmtp.formatProperties.optimalTilingFeatures;
+
+ wlr_log(WLR_DEBUG, " >> shmtex: supported");
+
+ dev->shm_formats[dev->shm_format_count] = format->drm_format;
+ ++dev->shm_format_count;
+
+ add_fmt_props = true;
+ }
+ } else {
+ wlr_log(WLR_DEBUG, " >> shmtex: format features not supported");
+ }
+
+ if (add_fmt_props) {
+ dev->format_props[dev->format_prop_count] = props;
+ ++dev->format_prop_count;
+ }
+}
+
+void vulkan_format_props_finish(struct wlr_vk_format_props *props) {
+ free(props->texture_mods);
+ free(props->render_mods);
+}
+
+struct wlr_vk_format_modifier_props *vulkan_format_props_find_modifier(
+ struct wlr_vk_format_props *props, uint64_t mod, bool render) {
+ if (render) {
+ for (unsigned i = 0u; i < props->render_mod_count; ++i) {
+ if (props->render_mods[i].props.drmFormatModifier == mod) {
+ return &props->render_mods[i];
+ }
+ }
+ } else {
+ for (unsigned i = 0u; i < props->texture_mod_count; ++i) {
+ if (props->texture_mods[i].props.drmFormatModifier == mod) {
+ return &props->texture_mods[i];
+ }
+ }
+ }
+
+ return NULL;
+}
+
diff --git a/render/vulkan/renderer.c b/render/vulkan/renderer.c
new file mode 100644
index 00000000..4fae81a8
--- /dev/null
+++ b/render/vulkan/renderer.c
@@ -0,0 +1,1540 @@
+#define _POSIX_C_SOURCE 200809L
+#include <assert.h>
+#include <fcntl.h>
+#include <math.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <drm_fourcc.h>
+#include <vulkan/vulkan.h>
+#include <wlr/render/interface.h>
+#include <wlr/types/wlr_drm.h>
+#include <wlr/types/wlr_matrix.h>
+#include <wlr/util/box.h>
+#include <wlr/util/log.h>
+#include <wlr/render/vulkan.h>
+#include <wlr/backend/interface.h>
+#include <wlr/types/wlr_linux_dmabuf_v1.h>
+
+#include "render/pixel_format.h"
+#include "render/vulkan.h"
+#include "render/vulkan/shaders/common.vert.h"
+#include "render/vulkan/shaders/texture.frag.h"
+#include "render/vulkan/shaders/quad.frag.h"
+#include "types/wlr_buffer.h"
+
+// TODO:
+// - simplify stage allocation, don't track allocations but use ringbuffer-like
+// - use a pipeline cache (not sure when to save though, after every pipeline
+// creation?)
+// - create pipelines as derivatives of each other
+// - evaluate if creating VkDeviceMemory pools is a good idea.
+// We can expect wayland client images to be fairly large (and shouldn't
+// have more than 4k of those I guess) but pooling memory allocations
+// might still be a good idea.
+
+static const VkDeviceSize min_stage_size = 1024 * 1024; // 1MB
+static const VkDeviceSize max_stage_size = 64 * min_stage_size; // 64MB
+static const size_t start_descriptor_pool_size = 256u;
+static bool default_debug = true;
+
+static const struct wlr_renderer_impl renderer_impl;
+
+struct wlr_vk_renderer *vulkan_get_renderer(struct wlr_renderer *wlr_renderer) {
+ assert(wlr_renderer->impl == &renderer_impl);
+ return (struct wlr_vk_renderer *)wlr_renderer;
+}
+
+static struct wlr_vk_render_format_setup *find_or_create_render_setup(
+ struct wlr_vk_renderer *renderer, VkFormat format);
+
+// vertex shader push constant range data
+struct vert_pcr_data {
+ float mat4[4][4];
+ float uv_off[2];
+ float uv_size[2];
+};
+
+// https://www.w3.org/Graphics/Color/srgb
+static float color_to_linear(float non_linear) {
+ return (non_linear > 0.04045) ?
+ pow((non_linear + 0.055) / 1.055, 2.4) :
+ non_linear / 12.92;
+}
+
+// renderer
+// util
+static void mat3_to_mat4(const float mat3[9], float mat4[4][4]) {
+ memset(mat4, 0, sizeof(float) * 16);
+ mat4[0][0] = mat3[0];
+ mat4[0][1] = mat3[1];
+ mat4[0][3] = mat3[2];
+
+ mat4[1][0] = mat3[3];
+ mat4[1][1] = mat3[4];
+ mat4[1][3] = mat3[5];
+
+ mat4[2][2] = 1.f;
+ mat4[3][3] = 1.f;
+}
+
+struct wlr_vk_descriptor_pool *vulkan_alloc_texture_ds(
+ struct wlr_vk_renderer *renderer, VkDescriptorSet *ds) {
+ VkResult res;
+ VkDescriptorSetAllocateInfo ds_info = {0};
+ ds_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
+ ds_info.descriptorSetCount = 1;
+ ds_info.pSetLayouts = &renderer->ds_layout;
+
+ bool found = false;
+ struct wlr_vk_descriptor_pool *pool;
+ wl_list_for_each(pool, &renderer->descriptor_pools, link) {
+ if (pool->free > 0) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) { // create new pool
+ pool = calloc(1, sizeof(*pool));
+ if (!pool) {
+ wlr_log_errno(WLR_ERROR, "allocation failed");
+ return NULL;
+ }
+
+ size_t count = renderer->last_pool_size;
+ if (!count) {
+ count = start_descriptor_pool_size;
+ }
+
+ pool->free = count;
+ VkDescriptorPoolSize pool_size = {0};
+ pool_size.descriptorCount = count;
+ pool_size.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+
+ VkDescriptorPoolCreateInfo dpool_info = {0};
+ dpool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
+ dpool_info.maxSets = count;
+ dpool_info.poolSizeCount = 1;
+ dpool_info.pPoolSizes = &pool_size;
+ dpool_info.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
+
+ res = vkCreateDescriptorPool(renderer->dev->dev, &dpool_info, NULL,
+ &pool->pool);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkCreateDescriptorPool", res);
+ free(pool);
+ return NULL;
+ }
+
+ wl_list_insert(&renderer->descriptor_pools, &pool->link);
+ }
+
+ ds_info.descriptorPool = pool->pool;
+ res = vkAllocateDescriptorSets(renderer->dev->dev, &ds_info, ds);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkAllocateDescriptorSets", res);
+ return NULL;
+ }
+
+ --pool->free;
+ return pool;
+}
+
+void vulkan_free_ds(struct wlr_vk_renderer *renderer,
+ struct wlr_vk_descriptor_pool *pool, VkDescriptorSet ds) {
+ vkFreeDescriptorSets(renderer->dev->dev, pool->pool, 1, &ds);
+ ++pool->free;
+}
+
+static void destroy_render_format_setup(struct wlr_vk_renderer *renderer,
+ struct wlr_vk_render_format_setup *setup) {
+ if (!setup) {
+ return;
+ }
+
+ VkDevice dev = renderer->dev->dev;
+ vkDestroyRenderPass(dev, setup->render_pass, NULL);
+ vkDestroyPipeline(dev, setup->tex_pipe, NULL);
+ vkDestroyPipeline(dev, setup->quad_pipe, NULL);
+}
+
+static void shared_buffer_destroy(struct wlr_vk_renderer *r,
+ struct wlr_vk_shared_buffer *buffer) {
+ if (!buffer) {
+ return;
+ }
+
+ if (buffer->allocs_size > 0) {
+ wlr_log(WLR_ERROR, "shared_buffer_finish: %d allocations left",
+ (unsigned) buffer->allocs_size);
+ }
+
+ free(buffer->allocs);
+ if (buffer->buffer) {
+ vkDestroyBuffer(r->dev->dev, buffer->buffer, NULL);
+ }
+ if (buffer->memory) {
+ vkFreeMemory(r->dev->dev, buffer->memory, NULL);
+ }
+
+ wl_list_remove(&buffer->link);
+ free(buffer);
+}
+
+static void release_stage_allocations(struct wlr_vk_renderer *renderer) {
+ struct wlr_vk_shared_buffer *buf;
+ wl_list_for_each(buf, &renderer->stage.buffers, link) {
+ buf->allocs_size = 0u;
+ }
+}
+
+struct wlr_vk_buffer_span vulkan_get_stage_span(struct wlr_vk_renderer *r,
+ VkDeviceSize size) {
+ // try to find free span
+ // simple greedy allocation algorithm - should be enough for this usecase
+ // since all allocations are freed together after the frame
+ struct wlr_vk_shared_buffer *buf;
+ wl_list_for_each_reverse(buf, &r->stage.buffers, link) {
+ VkDeviceSize start = 0u;
+ if (buf->allocs_size > 0) {
+ struct wlr_vk_allocation *last = &buf->allocs[buf->allocs_size - 1];
+ start = last->start + last->size;
+ }
+
+ assert(start <= buf->buf_size);
+ if (buf->buf_size - start < size) {
+ continue;
+ }
+
+ ++buf->allocs_size;
+ if (buf->allocs_size > buf->allocs_capacity) {
+ buf->allocs_capacity = buf->allocs_size * 2;
+ void *allocs = realloc(buf->allocs,
+ buf->allocs_capacity * sizeof(*buf->allocs));
+ if (!allocs) {
+ wlr_log_errno(WLR_ERROR, "Allocation failed");
+ goto error_alloc;
+ }
+
+ buf->allocs = allocs;
+ }
+
+ struct wlr_vk_allocation *a = &buf->allocs[buf->allocs_size - 1];
+ a->start = start;
+ a->size = size;
+ return (struct wlr_vk_buffer_span) {
+ .buffer = buf,
+ .alloc = *a,
+ };
+ }
+
+ // we didn't find a free buffer - create one
+ // size = clamp(max(size * 2, prev_size * 2), min_size, max_size)
+ VkDeviceSize bsize = size * 2;
+ bsize = bsize < min_stage_size ? min_stage_size : bsize;
+ if (!wl_list_empty(&r->stage.buffers)) {
+ struct wl_list *last_link = r->stage.buffers.prev;
+ struct wlr_vk_shared_buffer *prev = wl_container_of(
+ last_link, prev, link);
+ VkDeviceSize last_size = 2 * prev->buf_size;
+ bsize = bsize < last_size ? last_size : bsize;
+ }
+
+ if (bsize > max_stage_size) {
+ wlr_log(WLR_INFO, "vulkan stage buffers have reached max size");
+ bsize = max_stage_size;
+ }
+
+ // create buffer
+ buf = calloc(1, sizeof(*buf));
+ if (!buf) {
+ wlr_log_errno(WLR_ERROR, "Allocation failed");
+ goto error_alloc;
+ }
+
+ VkResult res;
+ VkBufferCreateInfo buf_info = {0};
+ buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ buf_info.size = bsize;
+ buf_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT |
+ VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+ buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ res = vkCreateBuffer(r->dev->dev, &buf_info, NULL, &buf->buffer);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkCreateBuffer", res);
+ goto error;
+ }
+
+ VkMemoryRequirements mem_reqs;
+ vkGetBufferMemoryRequirements(r->dev->dev, buf->buffer, &mem_reqs);
+
+ VkMemoryAllocateInfo mem_info = {0};
+ mem_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ mem_info.allocationSize = mem_reqs.size;
+ mem_info.memoryTypeIndex = vulkan_find_mem_type(r->dev,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, mem_reqs.memoryTypeBits);
+ res = vkAllocateMemory(r->dev->dev, &mem_info, NULL, &buf->memory);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkAllocatorMemory", res);
+ goto error;
+ }
+
+ res = vkBindBufferMemory(r->dev->dev, buf->buffer, buf->memory, 0);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkBindBufferMemory", res);
+ goto error;
+ }
+
+ size_t start_count = 8u;
+ buf->allocs = calloc(start_count, sizeof(*buf->allocs));
+ if (!buf->allocs) {
+ wlr_log_errno(WLR_ERROR, "Allocation failed");
+ goto error;
+ }
+
+ wlr_log(WLR_DEBUG, "Created new vk staging buffer of size %" PRIu64, bsize);
+ buf->buf_size = bsize;
+ wl_list_insert(&r->stage.buffers, &buf->link);
+
+ buf->allocs_capacity = start_count;
+ buf->allocs_size = 1u;
+ buf->allocs[0].start = 0u;
+ buf->allocs[0].size = size;
+ return (struct wlr_vk_buffer_span) {
+ .buffer = buf,
+ .alloc = buf->allocs[0],
+ };
+
+error:
+ shared_buffer_destroy(r, buf);
+
+error_alloc:
+ return (struct wlr_vk_buffer_span) {
+ .buffer = NULL,
+ .alloc = (struct wlr_vk_allocation) {0, 0},
+ };
+}
+
+VkCommandBuffer vulkan_record_stage_cb(struct wlr_vk_renderer *renderer) {
+ if (!renderer->stage.recording) {
+ VkCommandBufferBeginInfo begin_info = {0};
+ begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ vkBeginCommandBuffer(renderer->stage.cb, &begin_info);
+ renderer->stage.recording = true;
+ }
+
+ return renderer->stage.cb;
+}
+
+bool vulkan_submit_stage_wait(struct wlr_vk_renderer *renderer) {
+ if (!renderer->stage.recording) {
+ return false;
+ }
+
+ vkEndCommandBuffer(renderer->stage.cb);
+ renderer->stage.recording = false;
+
+ VkSubmitInfo submit_info = {0};
+ submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submit_info.commandBufferCount = 1u;
+ submit_info.pCommandBuffers = &renderer->stage.cb;
+ VkResult res = vkQueueSubmit(renderer->dev->queue, 1,
+ &submit_info, renderer->fence);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkQueueSubmit", res);
+ return false;
+ }
+
+ res = vkWaitForFences(renderer->dev->dev, 1, &renderer->fence, true,
+ UINT64_MAX);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkWaitForFences", res);
+ return false;
+ }
+
+ // NOTE: don't release stage allocations here since they may still be
+ // used for reading. Will be done next frame.
+ res = vkResetFences(renderer->dev->dev, 1, &renderer->fence);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkResetFences", res);
+ return false;
+ }
+
+ return true;
+}
+
+struct wlr_vk_format_props *vulkan_format_props_from_drm(
+ struct wlr_vk_device *dev, uint32_t drm_fmt) {
+ for (size_t i = 0u; i < dev->format_prop_count; ++i) {
+ if (dev->format_props[i].format.drm_format == drm_fmt) {
+ return &dev->format_props[i];
+ }
+ }
+ return NULL;
+}
+
+// buffer import
+static void destroy_render_buffer(struct wlr_vk_render_buffer *buffer) {
+ wl_list_remove(&buffer->link);
+ wl_list_remove(&buffer->buffer_destroy.link);
+
+ assert(buffer->renderer->current_render_buffer != buffer);
+
+ VkDevice dev = buffer->renderer->dev->dev;
+
+ vkDestroyFramebuffer(dev, buffer->framebuffer, NULL);
+ vkDestroyImageView(dev, buffer->image_view, NULL);
+ vkDestroyImage(dev, buffer->image, NULL);
+
+ for (size_t i = 0u; i < buffer->mem_count; ++i) {
+ vkFreeMemory(dev, buffer->memories[i], NULL);
+ }
+
+ free(buffer);
+}
+
+static struct wlr_vk_render_buffer *get_render_buffer(
+ struct wlr_vk_renderer *renderer, struct wlr_buffer *wlr_buffer) {
+ struct wlr_vk_render_buffer *buffer;
+ wl_list_for_each(buffer, &renderer->render_buffers, link) {
+ if (buffer->wlr_buffer == wlr_buffer) {
+ return buffer;
+ }
+ }
+ return NULL;
+}
+
+static void handle_render_buffer_destroy(struct wl_listener *listener, void *data) {
+ struct wlr_vk_render_buffer *buffer =
+ wl_container_of(listener, buffer, buffer_destroy);
+ destroy_render_buffer(buffer);
+}
+
+static struct wlr_vk_render_buffer *create_render_buffer(
+ struct wlr_vk_renderer *renderer, struct wlr_buffer *wlr_buffer) {
+ VkResult res;
+
+ struct wlr_vk_render_buffer *buffer = calloc(1, sizeof(*buffer));
+ if (buffer == NULL) {
+ wlr_log_errno(WLR_ERROR, "Allocation failed");
+ return NULL;
+ }
+ buffer->wlr_buffer = wlr_buffer;
+ buffer->renderer = renderer;
+
+ struct wlr_dmabuf_attributes dmabuf = {0};
+ if (!wlr_buffer_get_dmabuf(wlr_buffer, &dmabuf)) {
+ goto error_buffer;
+ }
+
+ wlr_log(WLR_DEBUG, "vulkan create_render_buffer: %.4s, %dx%d",
+ (const char*) &dmabuf.format, dmabuf.width, dmabuf.height);
+
+ // NOTE: we could at least support WLR_DMABUF_ATTRIBUTES_FLAGS_Y_INVERT
+ // if it is needed by anyone. Can be implemented using negative viewport
+ // height or flipping matrix.
+ if (dmabuf.flags != 0) {
+ wlr_log(WLR_ERROR, "dmabuf flags %x not supported/implemented on vulkan",
+ dmabuf.flags);
+ goto error_buffer;
+ }
+
+ buffer->image = vulkan_import_dmabuf(renderer, &dmabuf,
+ buffer->memories, &buffer->mem_count, true);
+ if (!buffer->image) {
+ goto error_buffer;
+ }
+
+ VkDevice dev = renderer->dev->dev;
+ const struct wlr_vk_format_props *fmt = vulkan_format_props_from_drm(
+ renderer->dev, dmabuf.format);
+ if (fmt == NULL) {
+ wlr_log(WLR_ERROR, "Unsupported pixel format %"PRIx32 " (%.4s)",
+ dmabuf.format, (const char*) &dmabuf.format);
+ goto error_buffer;
+ }
+
+ VkImageViewCreateInfo view_info = {0};
+ view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ view_info.image = buffer->image;
+ view_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
+ view_info.format = fmt->format.vk_format;
+ view_info.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
+ view_info.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
+ view_info.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
+ view_info.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
+ view_info.subresourceRange = (VkImageSubresourceRange) {
+ VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1
+ };
+
+ res = vkCreateImageView(dev, &view_info, NULL, &buffer->image_view);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkCreateImageView failed", res);
+ goto error_view;
+ }
+
+ buffer->render_setup = find_or_create_render_setup(
+ renderer, fmt->format.vk_format);
+ if (!buffer->render_setup) {
+ goto error_view;
+ }
+
+ VkFramebufferCreateInfo fb_info = {0};
+ fb_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
+ fb_info.attachmentCount = 1u;
+ fb_info.pAttachments = &buffer->image_view;
+ fb_info.flags = 0u;
+ fb_info.width = dmabuf.width;
+ fb_info.height = dmabuf.height;
+ fb_info.layers = 1u;
+ fb_info.renderPass = buffer->render_setup->render_pass;
+
+ res = vkCreateFramebuffer(dev, &fb_info, NULL, &buffer->framebuffer);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkCreateFramebuffer", res);
+ goto error_view;
+ }
+
+ buffer->buffer_destroy.notify = handle_render_buffer_destroy;
+ wl_signal_add(&wlr_buffer->events.destroy, &buffer->buffer_destroy);
+ wl_list_insert(&renderer->render_buffers, &buffer->link);
+
+ return buffer;
+
+error_view:
+ vkDestroyFramebuffer(dev, buffer->framebuffer, NULL);
+ vkDestroyImageView(dev, buffer->image_view, NULL);
+ vkDestroyImage(dev, buffer->image, NULL);
+ for (size_t i = 0u; i < buffer->mem_count; ++i) {
+ vkFreeMemory(dev, buffer->memories[i], NULL);
+ }
+error_buffer:
+ wlr_dmabuf_attributes_finish(&dmabuf);
+ free(buffer);
+ return NULL;
+}
+
+// interface implementation
+static bool vulkan_bind_buffer(struct wlr_renderer *wlr_renderer,
+ struct wlr_buffer *wlr_buffer) {
+ struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
+
+ if (renderer->current_render_buffer) {
+ wlr_buffer_unlock(renderer->current_render_buffer->wlr_buffer);
+ renderer->current_render_buffer = NULL;
+ }
+
+ if (!wlr_buffer) {
+ return true;
+ }
+
+ struct wlr_vk_render_buffer *buffer = get_render_buffer(renderer, wlr_buffer);
+ if (!buffer) {
+ buffer = create_render_buffer(renderer, wlr_buffer);
+ if (!buffer) {
+ return false;
+ }
+ }
+
+ wlr_buffer_lock(wlr_buffer);
+ renderer->current_render_buffer = buffer;
+ return true;
+}
+
+static void vulkan_begin(struct wlr_renderer *wlr_renderer,
+ uint32_t width, uint32_t height) {
+ struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
+ assert(renderer->current_render_buffer);
+
+ VkCommandBuffer cb = renderer->cb;
+ VkCommandBufferBeginInfo begin_info = {0};
+ begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ vkBeginCommandBuffer(cb, &begin_info);
+
+ // begin render pass
+ VkFramebuffer fb = renderer->current_render_buffer->framebuffer;
+
+ VkRect2D rect = {{0, 0}, {width, height}};
+ renderer->scissor = rect;
+
+ VkRenderPassBeginInfo rp_info = {0};
+ rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ rp_info.renderArea = rect;
+ rp_info.renderPass = renderer->current_render_buffer->render_setup->render_pass;
+ rp_info.framebuffer = fb;
+ rp_info.clearValueCount = 0;
+ vkCmdBeginRenderPass(cb, &rp_info, VK_SUBPASS_CONTENTS_INLINE);
+
+ VkViewport vp = {0.f, 0.f, (float) width, (float) height, 0.f, 1.f};
+ vkCmdSetViewport(cb, 0, 1, &vp);
+ vkCmdSetScissor(cb, 0, 1, &rect);
+
+ // Refresh projection matrix.
+ // wlr_matrix_projection assumes a GL corrdinate system so we need
+ // to pass WL_OUTPUT_TRANSFORM_FLIPPED_180 to adjust it for vulkan.
+ wlr_matrix_projection(renderer->projection, width, height,
+ WL_OUTPUT_TRANSFORM_FLIPPED_180);
+
+ renderer->render_width = width;
+ renderer->render_height = height;
+ renderer->bound_pipe = VK_NULL_HANDLE;
+}
+
+static void vulkan_end(struct wlr_renderer *wlr_renderer) {
+ struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
+ assert(renderer->current_render_buffer);
+
+ VkCommandBuffer render_cb = renderer->cb;
+ VkCommandBuffer pre_cb = vulkan_record_stage_cb(renderer);
+
+ renderer->render_width = 0u;
+ renderer->render_height = 0u;
+ renderer->bound_pipe = VK_NULL_HANDLE;
+
+ vkCmdEndRenderPass(render_cb);
+
+ // insert acquire and release barriers for dmabuf-images
+ unsigned barrier_count = wl_list_length(&renderer->foreign_textures) + 1;
+ VkImageMemoryBarrier* acquire_barriers = calloc(barrier_count, sizeof(VkImageMemoryBarrier));
+ VkImageMemoryBarrier* release_barriers = calloc(barrier_count, sizeof(VkImageMemoryBarrier));
+
+ struct wlr_vk_texture *texture, *tmp_tex;
+ unsigned idx = 0;
+
+ wl_list_for_each_safe(texture, tmp_tex, &renderer->foreign_textures, foreign_link) {
+ VkImageLayout src_layout = VK_IMAGE_LAYOUT_GENERAL;
+ if (!texture->transitioned) {
+ src_layout = VK_IMAGE_LAYOUT_PREINITIALIZED;
+ texture->transitioned = true;
+ }
+
+ // acquire
+ acquire_barriers[idx].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ acquire_barriers[idx].srcQueueFamilyIndex = VK_QUEUE_FAMILY_FOREIGN_EXT;
+ acquire_barriers[idx].dstQueueFamilyIndex = renderer->dev->queue_family;
+ acquire_barriers[idx].image = texture->image;
+ acquire_barriers[idx].oldLayout = src_layout;
+ acquire_barriers[idx].newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ acquire_barriers[idx].srcAccessMask = 0u; // ignored anyways
+ acquire_barriers[idx].dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ acquire_barriers[idx].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ acquire_barriers[idx].subresourceRange.layerCount = 1;
+ acquire_barriers[idx].subresourceRange.levelCount = 1;
+
+ // releaes
+ release_barriers[idx].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ release_barriers[idx].srcQueueFamilyIndex = renderer->dev->queue_family;
+ release_barriers[idx].dstQueueFamilyIndex = VK_QUEUE_FAMILY_FOREIGN_EXT;
+ release_barriers[idx].image = texture->image;
+ release_barriers[idx].oldLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ release_barriers[idx].newLayout = VK_IMAGE_LAYOUT_GENERAL;
+ release_barriers[idx].srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ release_barriers[idx].dstAccessMask = 0u; // ignored anyways
+ release_barriers[idx].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ release_barriers[idx].subresourceRange.layerCount = 1;
+ release_barriers[idx].subresourceRange.levelCount = 1;
+ ++idx;
+
+ wl_list_remove(&texture->foreign_link);
+ texture->owned = false;
+ }
+
+ // also add acquire/release barriers for the current render buffer
+ VkImageLayout src_layout = VK_IMAGE_LAYOUT_GENERAL;
+ if (!renderer->current_render_buffer->transitioned) {
+ src_layout = VK_IMAGE_LAYOUT_PREINITIALIZED;
+ renderer->current_render_buffer->transitioned = true;
+ }
+
+ // acquire render buffer before rendering
+ acquire_barriers[idx].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ acquire_barriers[idx].srcQueueFamilyIndex = VK_QUEUE_FAMILY_FOREIGN_EXT;
+ acquire_barriers[idx].dstQueueFamilyIndex = renderer->dev->queue_family;
+ acquire_barriers[idx].image = renderer->current_render_buffer->image;
+ acquire_barriers[idx].oldLayout = src_layout;
+ acquire_barriers[idx].newLayout = VK_IMAGE_LAYOUT_GENERAL;
+ acquire_barriers[idx].srcAccessMask = 0u; // ignored anyways
+ acquire_barriers[idx].dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ acquire_barriers[idx].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ acquire_barriers[idx].subresourceRange.layerCount = 1;
+ acquire_barriers[idx].subresourceRange.levelCount = 1;
+
+ // release render buffer after rendering
+ release_barriers[idx].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ release_barriers[idx].srcQueueFamilyIndex = renderer->dev->queue_family;
+ release_barriers[idx].dstQueueFamilyIndex = VK_QUEUE_FAMILY_FOREIGN_EXT;
+ release_barriers[idx].image = renderer->current_render_buffer->image;
+ release_barriers[idx].oldLayout = VK_IMAGE_LAYOUT_GENERAL;
+ release_barriers[idx].newLayout = VK_IMAGE_LAYOUT_GENERAL;
+ release_barriers[idx].srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ release_barriers[idx].dstAccessMask = 0u; // ignored anyways
+ release_barriers[idx].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ release_barriers[idx].subresourceRange.layerCount = 1;
+ release_barriers[idx].subresourceRange.levelCount = 1;
+ ++idx;
+
+ vkCmdPipelineBarrier(pre_cb, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
+ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
+ 0, 0, NULL, 0, NULL, barrier_count, acquire_barriers);
+
+ vkCmdPipelineBarrier(render_cb, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
+ VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0, NULL,
+ barrier_count, release_barriers);
+
+ free(acquire_barriers);
+ free(release_barriers);
+
+ vkEndCommandBuffer(renderer->cb);
+
+ unsigned submit_count = 0u;
+ VkSubmitInfo submit_infos[2] = {0};
+
+ // No semaphores needed here.
+ // We don't need a semaphore from the stage/transfer submission
+ // to the render submissions since they are on the same queue
+ // and we have a renderpass dependency for that.
+ if (renderer->stage.recording) {
+ vkEndCommandBuffer(renderer->stage.cb);
+ renderer->stage.recording = false;
+
+ VkSubmitInfo *stage_sub = &submit_infos[submit_count];
+ stage_sub->sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ stage_sub->commandBufferCount = 1u;
+ stage_sub->pCommandBuffers = &pre_cb;
+ ++submit_count;
+ }
+
+ VkSubmitInfo *render_sub = &submit_infos[submit_count];
+ render_sub->sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ render_sub->pCommandBuffers = &render_cb;
+ render_sub->commandBufferCount = 1u;
+ ++submit_count;
+
+ VkResult res = vkQueueSubmit(renderer->dev->queue, submit_count,
+ submit_infos, renderer->fence);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkQueueSubmit", res);
+ return;
+ }
+
+ // sadly this is required due to the current api/rendering model of wlr
+ // ideally we could use gpu and cpu in parallel (_without_ the
+ // implicit synchronization overhead and mess of opengl drivers)
+ res = vkWaitForFences(renderer->dev->dev, 1, &renderer->fence, true,
+ UINT64_MAX);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkWaitForFences", res);
+ return;
+ }
+
+ ++renderer->frame;
+ release_stage_allocations(renderer);
+
+ // destroy pending textures
+ wl_list_for_each_safe(texture, tmp_tex, &renderer->destroy_textures, destroy_link) {
+ wlr_texture_destroy(&texture->wlr_texture);
+ }
+
+ wl_list_init(&renderer->destroy_textures); // reset the list
+ res = vkResetFences(renderer->dev->dev, 1, &renderer->fence);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkResetFences", res);
+ return;
+ }
+}
+
+static bool vulkan_render_subtexture_with_matrix(struct wlr_renderer *wlr_renderer,
+ struct wlr_texture *wlr_texture, const struct wlr_fbox *box,
+ const float matrix[static 9], float alpha) {
+ struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
+ VkCommandBuffer cb = renderer->cb;
+
+ struct wlr_vk_texture *texture = vulkan_get_texture(wlr_texture);
+ assert(texture->renderer == renderer);
+ if (texture->dmabuf_imported && !texture->owned) {
+ // Store this texture in the list of textures that need to be
+ // acquired before rendering and released after rendering.
+ // We don't do it here immediately since barriers inside
+ // a renderpass are suboptimal (would require additional renderpass
+ // dependency and potentially multiple barriers) and it's
+ // better to issue one barrier for all used textures anyways.
+ texture->owned = true;
+ assert(texture->foreign_link.prev == NULL);
+ assert(texture->foreign_link.next == NULL);
+ wl_list_insert(&renderer->foreign_textures, &texture->foreign_link);
+ }
+
+ VkPipeline pipe = renderer->current_render_buffer->render_setup->tex_pipe;
+ if (pipe != renderer->bound_pipe) {
+ vkCmdBindPipeline(cb, VK_PIPELINE_BIND_POINT_GRAPHICS, pipe);
+ renderer->bound_pipe = pipe;
+ }
+
+ vkCmdBindDescriptorSets(cb, VK_PIPELINE_BIND_POINT_GRAPHICS,
+ renderer->pipe_layout, 0, 1, &texture->ds, 0, NULL);
+
+ float final_matrix[9];
+ wlr_matrix_multiply(final_matrix, renderer->projection, matrix);
+
+ struct vert_pcr_data vert_pcr_data;
+ mat3_to_mat4(final_matrix, vert_pcr_data.mat4);
+
+ vert_pcr_data.uv_off[0] = box->x / wlr_texture->width;
+ vert_pcr_data.uv_off[1] = box->y / wlr_texture->height;
+ vert_pcr_data.uv_size[0] = box->width / wlr_texture->width;
+ vert_pcr_data.uv_size[1] = box->height / wlr_texture->height;
+
+ if (texture->invert_y) {
+ vert_pcr_data.uv_off[1] += vert_pcr_data.uv_size[1];
+ vert_pcr_data.uv_size[1] = -vert_pcr_data.uv_size[1];
+ }
+
+ // When the texture itself does not have alpha information we want
+ // to ignore the sampled value and just use the alpha passed here,
+ // we pass a negative value to communicate that.
+ // See the texture.frag shader for more details.
+ const struct wlr_pixel_format_info *format_info = drm_get_pixel_format_info(
+ texture->format->drm_format);
+ assert(format_info);
+ if (!format_info->has_alpha) {
+ alpha *= -1;
+ }
+
+ vkCmdPushConstants(cb, renderer->pipe_layout,
+ VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(vert_pcr_data), &vert_pcr_data);
+ vkCmdPushConstants(cb, renderer->pipe_layout,
+ VK_SHADER_STAGE_FRAGMENT_BIT, sizeof(vert_pcr_data), sizeof(float),
+ &alpha);
+ vkCmdDraw(cb, 4, 1, 0, 0);
+ texture->last_used = renderer->frame;
+
+ return true;
+}
+
+static void vulkan_clear(struct wlr_renderer *wlr_renderer,
+ const float color[static 4]) {
+ struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
+ VkCommandBuffer cb = renderer->cb;
+
+ VkClearAttachment att = {0};
+ att.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ att.colorAttachment = 0u;
+
+ // Input color values are given in srgb space, vulkan expects
+ // them in linear space. We explicitly import argb8 render buffers
+ // as srgb, vulkan will convert the input values we give here to
+ // srgb first.
+ // But in other parts of wlroots we just always assume
+ // srgb so that's why we have to convert here.
+ att.clearValue.color.float32[0] = color_to_linear(color[0]);
+ att.clearValue.color.float32[1] = color_to_linear(color[1]);
+ att.clearValue.color.float32[2] = color_to_linear(color[2]);
+ att.clearValue.color.float32[3] = color[3]; // no conversion for alpha
+
+ VkClearRect rect = {0};
+ rect.rect = renderer->scissor;
+ rect.layerCount = 1;
+ vkCmdClearAttachments(cb, 1, &att, 1, &rect);
+}
+
+static void vulkan_scissor(struct wlr_renderer *wlr_renderer,
+ struct wlr_box *box) {
+ struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
+ VkCommandBuffer cb = renderer->cb;
+
+ uint32_t w = renderer->render_width;
+ uint32_t h = renderer->render_height;
+ struct wlr_box dst = {0, 0, w, h};
+ if (box && !wlr_box_intersection(&dst, box, &dst)) {
+ dst = (struct wlr_box) {0, 0, 0, 0}; // empty
+ }
+
+ VkRect2D rect = (VkRect2D) {{dst.x, dst.y}, {dst.width, dst.height}};
+ renderer->scissor = rect;
+ vkCmdSetScissor(cb, 0, 1, &rect);
+}
+
+static const uint32_t *vulkan_get_shm_texture_formats(
+ struct wlr_renderer *wlr_renderer, size_t *len) {
+ struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
+ *len = renderer->dev->shm_format_count;
+ return renderer->dev->shm_formats;
+}
+
+static void vulkan_render_quad_with_matrix(struct wlr_renderer *wlr_renderer,
+ const float color[static 4], const float matrix[static 9]) {
+ struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
+ VkCommandBuffer cb = renderer->cb;
+
+ VkPipeline pipe = renderer->current_render_buffer->render_setup->quad_pipe;
+ if (pipe != renderer->bound_pipe) {
+ vkCmdBindPipeline(cb, VK_PIPELINE_BIND_POINT_GRAPHICS, pipe);
+ renderer->bound_pipe = pipe;
+ }
+
+ float final_matrix[9];
+ wlr_matrix_multiply(final_matrix, renderer->projection, matrix);
+
+ struct vert_pcr_data vert_pcr_data;
+ mat3_to_mat4(final_matrix, vert_pcr_data.mat4);
+ vert_pcr_data.uv_off[0] = 0.f;
+ vert_pcr_data.uv_off[1] = 0.f;
+ vert_pcr_data.uv_size[0] = 1.f;
+ vert_pcr_data.uv_size[1] = 1.f;
+
+ // Input color values are given in srgb space, shader expects
+ // them in linear space. The shader does all computation in linear
+ // space and expects in inputs in linear space since it outputs
+ // colors in linear space as well (and vulkan then automatically
+ // does the conversion for out SRGB render targets).
+ // But in other parts of wlroots we just always assume
+ // srgb so that's why we have to convert here.
+ float linear_color[4];
+ linear_color[0] = color_to_linear(color[0]);
+ linear_color[1] = color_to_linear(color[1]);
+ linear_color[2] = color_to_linear(color[2]);
+ linear_color[3] = color[3]; // no conversion for alpha
+
+ vkCmdPushConstants(cb, renderer->pipe_layout,
+ VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(vert_pcr_data), &vert_pcr_data);
+ vkCmdPushConstants(cb, renderer->pipe_layout,
+ VK_SHADER_STAGE_FRAGMENT_BIT, sizeof(vert_pcr_data), sizeof(float) * 4,
+ linear_color);
+ vkCmdDraw(cb, 4, 1, 0, 0);
+}
+
+static const struct wlr_drm_format_set *vulkan_get_dmabuf_texture_formats(
+ struct wlr_renderer *wlr_renderer) {
+ struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
+ return &renderer->dev->dmabuf_texture_formats;
+}
+
+static const struct wlr_drm_format_set *vulkan_get_render_formats(
+ struct wlr_renderer *wlr_renderer) {
+ struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
+ return &renderer->dev->dmabuf_render_formats;
+}
+
+static uint32_t vulkan_preferred_read_format(
+ struct wlr_renderer *wlr_renderer) {
+ // TODO: implement!
+ wlr_log(WLR_ERROR, "vulkan_preferred_read_format not implemented");
+ return DRM_FORMAT_XBGR8888;
+}
+
+static void vulkan_destroy(struct wlr_renderer *wlr_renderer) {
+ struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
+ struct wlr_vk_device *dev = renderer->dev;
+ if (!dev) {
+ free(renderer);
+ return;
+ }
+
+ assert(!renderer->current_render_buffer);
+
+ // stage.cb automatically freed with command pool
+ struct wlr_vk_shared_buffer *buf, *tmp_buf;
+ wl_list_for_each_safe(buf, tmp_buf, &renderer->stage.buffers, link) {
+ shared_buffer_destroy(renderer, buf);
+ }
+
+ struct wlr_vk_texture *tex, *tex_tmp;
+ wl_list_for_each_safe(tex, tex_tmp, &renderer->textures, link) {
+ vulkan_texture_destroy(tex);
+ }
+
+ struct wlr_vk_render_buffer *render_buffer, *render_buffer_tmp;
+ wl_list_for_each_safe(render_buffer, render_buffer_tmp,
+ &renderer->render_buffers, link) {
+ destroy_render_buffer(render_buffer);
+ }
+
+ struct wlr_vk_render_format_setup *setup, *tmp_setup;
+ wl_list_for_each_safe(setup, tmp_setup,
+ &renderer->render_format_setups, link) {
+ destroy_render_format_setup(renderer, setup);
+ }
+
+ struct wlr_vk_descriptor_pool *pool, *tmp_pool;
+ wl_list_for_each_safe(pool, tmp_pool, &renderer->descriptor_pools, link) {
+ vkDestroyDescriptorPool(dev->dev, pool->pool, NULL);
+ free(pool);
+ }
+
+ vkDestroyShaderModule(dev->dev, renderer->vert_module, NULL);
+ vkDestroyShaderModule(dev->dev, renderer->tex_frag_module, NULL);
+ vkDestroyShaderModule(dev->dev, renderer->quad_frag_module, NULL);
+
+ vkDestroyFence(dev->dev, renderer->fence, NULL);
+ vkDestroyPipelineLayout(dev->dev, renderer->pipe_layout, NULL);
+ vkDestroyDescriptorSetLayout(dev->dev, renderer->ds_layout, NULL);
+ vkDestroySampler(dev->dev, renderer->sampler, NULL);
+ vkDestroyCommandPool(dev->dev, renderer->command_pool, NULL);
+
+ struct wlr_vk_instance *ini = dev->instance;
+ vulkan_device_destroy(dev);
+ vulkan_instance_destroy(ini);
+ free(renderer);
+}
+
+static bool vulkan_read_pixels(struct wlr_renderer *wlr_renderer,
+ uint32_t drm_format, uint32_t *flags, uint32_t stride,
+ uint32_t width, uint32_t height, uint32_t src_x, uint32_t src_y,
+ uint32_t dst_x, uint32_t dst_y, void *data) {
+ // TODO: implement!
+ wlr_log(WLR_ERROR, "vulkan_read_pixels not implemented");
+ return false;
+}
+
+static int vulkan_get_drm_fd(struct wlr_renderer *wlr_renderer) {
+ struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
+ return renderer->dev->drm_fd;
+}
+
+static uint32_t vulkan_get_render_buffer_caps(struct wlr_renderer *wlr_renderer) {
+ return WLR_BUFFER_CAP_DMABUF;
+}
+
+static const struct wlr_renderer_impl renderer_impl = {
+ .bind_buffer = vulkan_bind_buffer,
+ .begin = vulkan_begin,
+ .end = vulkan_end,
+ .clear = vulkan_clear,
+ .scissor = vulkan_scissor,
+ .render_subtexture_with_matrix = vulkan_render_subtexture_with_matrix,
+ .render_quad_with_matrix = vulkan_render_quad_with_matrix,
+ .get_shm_texture_formats = vulkan_get_shm_texture_formats,
+ .get_dmabuf_texture_formats = vulkan_get_dmabuf_texture_formats,
+ .get_render_formats = vulkan_get_render_formats,
+ .preferred_read_format = vulkan_preferred_read_format,
+ .read_pixels = vulkan_read_pixels,
+ .destroy = vulkan_destroy,
+ .get_drm_fd = vulkan_get_drm_fd,
+ .get_render_buffer_caps = vulkan_get_render_buffer_caps,
+ .texture_from_buffer = vulkan_texture_from_buffer,
+};
+
+// Initializes the VkDescriptorSetLayout and VkPipelineLayout needed
+// for the texture rendering pipeline using the given VkSampler.
+static bool init_tex_layouts(struct wlr_vk_renderer *renderer,
+ VkSampler tex_sampler, VkDescriptorSetLayout *out_ds_layout,
+ VkPipelineLayout *out_pipe_layout) {
+ VkResult res;
+ VkDevice dev = renderer->dev->dev;
+
+ // layouts
+ // descriptor set
+ VkDescriptorSetLayoutBinding ds_bindings[1] = {{
+ 0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1,
+ VK_SHADER_STAGE_FRAGMENT_BIT, &tex_sampler,
+ }};
+
+ VkDescriptorSetLayoutCreateInfo ds_info = {0};
+ ds_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ ds_info.bindingCount = 1;
+ ds_info.pBindings = ds_bindings;
+
+ res = vkCreateDescriptorSetLayout(dev, &ds_info, NULL, out_ds_layout);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkCreateDescriptorSetLayout", res);
+ return false;
+ }
+
+ // pipeline layout
+ VkPushConstantRange pc_ranges[2] = {0};
+ pc_ranges[0].size = sizeof(struct vert_pcr_data);
+ pc_ranges[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
+
+ pc_ranges[1].offset = pc_ranges[0].size;
+ pc_ranges[1].size = sizeof(float) * 4; // alpha or color
+ pc_ranges[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
+
+ VkPipelineLayoutCreateInfo pl_info = {0};
+ pl_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+ pl_info.setLayoutCount = 1;
+ pl_info.pSetLayouts = out_ds_layout;
+ pl_info.pushConstantRangeCount = 2;
+ pl_info.pPushConstantRanges = pc_ranges;
+
+ res = vkCreatePipelineLayout(dev, &pl_info, NULL, out_pipe_layout);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkCreatePipelineLayout", res);
+ return false;
+ }
+
+ return true;
+}
+
+// Initializes the pipeline for rendering textures and using the given
+// VkRenderPass and VkPipelineLayout.
+static bool init_tex_pipeline(struct wlr_vk_renderer *renderer,
+ VkRenderPass rp, VkPipelineLayout pipe_layout, VkPipeline *pipe) {
+ VkResult res;
+ VkDevice dev = renderer->dev->dev;
+
+ // shaders
+ VkPipelineShaderStageCreateInfo vert_stage = {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ NULL, 0, VK_SHADER_STAGE_VERTEX_BIT, renderer->vert_module,
+ "main", NULL
+ };
+
+ VkPipelineShaderStageCreateInfo tex_stages[2] = {vert_stage, {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ NULL, 0, VK_SHADER_STAGE_FRAGMENT_BIT, renderer->tex_frag_module,
+ "main", NULL
+ }};
+
+ // info
+ VkPipelineInputAssemblyStateCreateInfo assembly = {0};
+ assembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
+ assembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN;
+
+ VkPipelineRasterizationStateCreateInfo rasterization = {0};
+ rasterization.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
+ rasterization.polygonMode = VK_POLYGON_MODE_FILL;
+ rasterization.cullMode = VK_CULL_MODE_NONE;
+ rasterization.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
+ rasterization.lineWidth = 1.f;
+
+ VkPipelineColorBlendAttachmentState blend_attachment = {0};
+ blend_attachment.blendEnable = true;
+ // we generally work with pre-multiplied alpha
+ blend_attachment.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
+ blend_attachment.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
+ blend_attachment.colorBlendOp = VK_BLEND_OP_ADD;
+ blend_attachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
+ blend_attachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
+ blend_attachment.alphaBlendOp = VK_BLEND_OP_ADD;
+ blend_attachment.colorWriteMask =
+ VK_COLOR_COMPONENT_R_BIT |
+ VK_COLOR_COMPONENT_G_BIT |
+ VK_COLOR_COMPONENT_B_BIT |
+ VK_COLOR_COMPONENT_A_BIT;
+
+ VkPipelineColorBlendStateCreateInfo blend = {0};
+ blend.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
+ blend.attachmentCount = 1;
+ blend.pAttachments = &blend_attachment;
+
+ VkPipelineMultisampleStateCreateInfo multisample = {0};
+ multisample.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
+ multisample.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
+
+ VkPipelineViewportStateCreateInfo viewport = {0};
+ viewport.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
+ viewport.viewportCount = 1;
+ viewport.scissorCount = 1;
+
+ VkDynamicState dynStates[2] = {
+ VK_DYNAMIC_STATE_VIEWPORT,
+ VK_DYNAMIC_STATE_SCISSOR,
+ };
+ VkPipelineDynamicStateCreateInfo dynamic = {0};
+ dynamic.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
+ dynamic.pDynamicStates = dynStates;
+ dynamic.dynamicStateCount = 2;
+
+ VkPipelineVertexInputStateCreateInfo vertex = {0};
+ vertex.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+
+ VkGraphicsPipelineCreateInfo pinfo = {0};
+ pinfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
+ pinfo.layout = pipe_layout;
+ pinfo.renderPass = rp;
+ pinfo.subpass = 0;
+ pinfo.stageCount = 2;
+ pinfo.pStages = tex_stages;
+
+ pinfo.pInputAssemblyState = &assembly;
+ pinfo.pRasterizationState = &rasterization;
+ pinfo.pColorBlendState = &blend;
+ pinfo.pMultisampleState = &multisample;
+ pinfo.pViewportState = &viewport;
+ pinfo.pDynamicState = &dynamic;
+ pinfo.pVertexInputState = &vertex;
+
+ // NOTE: use could use a cache here for faster loading
+ // store it somewhere like $XDG_CACHE_HOME/wlroots/vk_pipe_cache
+ VkPipelineCache cache = VK_NULL_HANDLE;
+ res = vkCreateGraphicsPipelines(dev, cache, 1, &pinfo, NULL, pipe);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("failed to create vulkan pipelines:", res);
+ return false;
+ }
+
+ return true;
+}
+
+// Creates static render data, such as sampler, layouts and shader modules
+// for the given rednerer.
+// Cleanup is done by destroying the renderer.
+static bool init_static_render_data(struct wlr_vk_renderer *renderer) {
+ VkResult res;
+ VkDevice dev = renderer->dev->dev;
+
+ // default sampler (non ycbcr)
+ VkSamplerCreateInfo sampler_info = {0};
+ sampler_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
+ sampler_info.magFilter = VK_FILTER_LINEAR;
+ sampler_info.minFilter = VK_FILTER_LINEAR;
+ sampler_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
+ sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
+ sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
+ sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
+ sampler_info.maxAnisotropy = 1.f;
+ sampler_info.minLod = 0.f;
+ sampler_info.maxLod = 0.25f;
+ sampler_info.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
+
+ res = vkCreateSampler(dev, &sampler_info, NULL, &renderer->sampler);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("Failed to create sampler", res);
+ return false;
+ }
+
+ if (!init_tex_layouts(renderer, renderer->sampler,
+ &renderer->ds_layout, &renderer->pipe_layout)) {
+ return false;
+ }
+
+ // load vert module and tex frag module since they are needed to
+ // initialize the tex pipeline
+ VkShaderModuleCreateInfo sinfo = {0};
+ sinfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+ sinfo.codeSize = sizeof(common_vert_data);
+ sinfo.pCode = common_vert_data;
+ res = vkCreateShaderModule(dev, &sinfo, NULL, &renderer->vert_module);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("Failed to create vertex shader module", res);
+ return false;
+ }
+
+ // tex frag
+ sinfo.codeSize = sizeof(texture_frag_data);
+ sinfo.pCode = texture_frag_data;
+ res = vkCreateShaderModule(dev, &sinfo, NULL, &renderer->tex_frag_module);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("Failed to create tex fragment shader module", res);
+ return false;
+ }
+
+ // quad frag
+ sinfo.codeSize = sizeof(quad_frag_data);
+ sinfo.pCode = quad_frag_data;
+ res = vkCreateShaderModule(dev, &sinfo, NULL, &renderer->quad_frag_module);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("Failed to create quad fragment shader module", res);
+ return false;
+ }
+
+ return true;
+}
+
+static struct wlr_vk_render_format_setup *find_or_create_render_setup(
+ struct wlr_vk_renderer *renderer, VkFormat format) {
+ struct wlr_vk_render_format_setup *setup;
+ wl_list_for_each(setup, &renderer->render_format_setups, link) {
+ if (setup->render_format == format) {
+ return setup;
+ }
+ }
+
+ setup = calloc(1u, sizeof(*setup));
+ if (!setup) {
+ wlr_log(WLR_ERROR, "Allocation failed");
+ return NULL;
+ }
+
+ setup->render_format = format;
+
+ // util
+ VkDevice dev = renderer->dev->dev;
+ VkResult res;
+
+ VkAttachmentDescription attachment = {0};
+ attachment.format = format;
+ attachment.samples = VK_SAMPLE_COUNT_1_BIT;
+ attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ attachment.initialLayout = VK_IMAGE_LAYOUT_GENERAL;
+ attachment.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
+
+ VkAttachmentReference color_ref = {0};
+ color_ref.attachment = 0u;
+ color_ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+ VkSubpassDescription subpass = {0};
+ subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ subpass.colorAttachmentCount = 1;
+ subpass.pColorAttachments = &color_ref;
+
+ VkSubpassDependency deps[2] = {0};
+ deps[0].srcSubpass = VK_SUBPASS_EXTERNAL;
+ deps[0].srcStageMask = VK_PIPELINE_STAGE_HOST_BIT |
+ VK_PIPELINE_STAGE_TRANSFER_BIT |
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT |
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ deps[0].srcAccessMask = VK_ACCESS_HOST_WRITE_BIT |
+ VK_ACCESS_TRANSFER_WRITE_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ deps[0].dstSubpass = 0;
+ deps[0].dstStageMask = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
+ deps[0].dstAccessMask = VK_ACCESS_UNIFORM_READ_BIT |
+ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
+ VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
+ VK_ACCESS_SHADER_READ_BIT;
+
+ deps[1].srcSubpass = 0;
+ deps[1].srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ deps[1].srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ deps[1].dstSubpass = VK_SUBPASS_EXTERNAL;
+ deps[1].dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT |
+ VK_PIPELINE_STAGE_HOST_BIT | VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
+ deps[1].dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT |
+ VK_ACCESS_MEMORY_READ_BIT;
+
+ VkRenderPassCreateInfo rp_info = {0};
+ rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+ rp_info.attachmentCount = 1;
+ rp_info.pAttachments = &attachment;
+ rp_info.subpassCount = 1;
+ rp_info.pSubpasses = &subpass;
+ rp_info.dependencyCount = 2u;
+ rp_info.pDependencies = deps;
+
+ res = vkCreateRenderPass(dev, &rp_info, NULL, &setup->render_pass);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("Failed to create render pass", res);
+ free(setup);
+ return NULL;
+ }
+
+ if (!init_tex_pipeline(renderer, setup->render_pass, renderer->pipe_layout,
+ &setup->tex_pipe)) {
+ goto error;
+ }
+
+ VkPipelineShaderStageCreateInfo vert_stage = {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ NULL, 0, VK_SHADER_STAGE_VERTEX_BIT, renderer->vert_module,
+ "main", NULL
+ };
+
+ VkPipelineShaderStageCreateInfo quad_stages[2] = {vert_stage, {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ NULL, 0, VK_SHADER_STAGE_FRAGMENT_BIT,
+ renderer->quad_frag_module, "main", NULL
+ }};
+
+ // info
+ VkPipelineInputAssemblyStateCreateInfo assembly = {0};
+ assembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
+ assembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN;
+
+ VkPipelineRasterizationStateCreateInfo rasterization = {0};
+ rasterization.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
+ rasterization.polygonMode = VK_POLYGON_MODE_FILL;
+ rasterization.cullMode = VK_CULL_MODE_NONE;
+ rasterization.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
+ rasterization.lineWidth = 1.f;
+
+ VkPipelineColorBlendAttachmentState blend_attachment = {0};
+ blend_attachment.blendEnable = true;
+ blend_attachment.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
+ blend_attachment.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
+ blend_attachment.colorBlendOp = VK_BLEND_OP_ADD;
+ blend_attachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
+ blend_attachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
+ blend_attachment.alphaBlendOp = VK_BLEND_OP_ADD;
+ blend_attachment.colorWriteMask =
+ VK_COLOR_COMPONENT_R_BIT |
+ VK_COLOR_COMPONENT_G_BIT |
+ VK_COLOR_COMPONENT_B_BIT |
+ VK_COLOR_COMPONENT_A_BIT;
+
+ VkPipelineColorBlendStateCreateInfo blend = {0};
+ blend.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
+ blend.attachmentCount = 1;
+ blend.pAttachments = &blend_attachment;
+
+ VkPipelineMultisampleStateCreateInfo multisample = {0};
+ multisample.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
+ multisample.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
+
+ VkPipelineViewportStateCreateInfo viewport = {0};
+ viewport.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
+ viewport.viewportCount = 1;
+ viewport.scissorCount = 1;
+
+ VkDynamicState dynStates[2] = {
+ VK_DYNAMIC_STATE_VIEWPORT,
+ VK_DYNAMIC_STATE_SCISSOR,
+ };
+ VkPipelineDynamicStateCreateInfo dynamic = {0};
+ dynamic.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
+ dynamic.pDynamicStates = dynStates;
+ dynamic.dynamicStateCount = 2;
+
+ VkPipelineVertexInputStateCreateInfo vertex = {0};
+ vertex.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+
+ VkGraphicsPipelineCreateInfo pinfo = {0};
+ pinfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
+ pinfo.layout = renderer->pipe_layout;
+ pinfo.renderPass = setup->render_pass;
+ pinfo.subpass = 0;
+ pinfo.stageCount = 2;
+ pinfo.pStages = quad_stages;
+
+ pinfo.pInputAssemblyState = &assembly;
+ pinfo.pRasterizationState = &rasterization;
+ pinfo.pColorBlendState = &blend;
+ pinfo.pMultisampleState = &multisample;
+ pinfo.pViewportState = &viewport;
+ pinfo.pDynamicState = &dynamic;
+ pinfo.pVertexInputState = &vertex;
+
+ // NOTE: use could use a cache here for faster loading
+ // store it somewhere like $XDG_CACHE_HOME/wlroots/vk_pipe_cache.bin
+ VkPipelineCache cache = VK_NULL_HANDLE;
+ res = vkCreateGraphicsPipelines(dev, cache, 1, &pinfo, NULL, &setup->quad_pipe);
+ if (res != VK_SUCCESS) {
+ wlr_log(WLR_ERROR, "failed to create vulkan quad pipeline: %d", res);
+ goto error;
+ }
+
+ wl_list_insert(&renderer->render_format_setups, &setup->link);
+ return setup;
+
+error:
+ destroy_render_format_setup(renderer, setup);
+ return NULL;
+}
+
+struct wlr_renderer *vulkan_renderer_create_for_device(struct wlr_vk_device *dev) {
+ struct wlr_vk_renderer *renderer;
+ VkResult res;
+ if (!(renderer = calloc(1, sizeof(*renderer)))) {
+ wlr_log_errno(WLR_ERROR, "failed to allocate wlr_vk_renderer");
+ return NULL;
+ }
+
+ renderer->dev = dev;
+ wlr_renderer_init(&renderer->wlr_renderer, &renderer_impl);
+ wl_list_init(&renderer->stage.buffers);
+ wl_list_init(&renderer->destroy_textures);
+ wl_list_init(&renderer->foreign_textures);
+ wl_list_init(&renderer->textures);
+ wl_list_init(&renderer->descriptor_pools);
+ wl_list_init(&renderer->render_format_setups);
+ wl_list_init(&renderer->render_buffers);
+
+ if (!init_static_render_data(renderer)) {
+ goto error;
+ }
+
+ // command pool
+ VkCommandPoolCreateInfo cpool_info = {0};
+ cpool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
+ cpool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
+ cpool_info.queueFamilyIndex = dev->queue_family;
+ res = vkCreateCommandPool(dev->dev, &cpool_info, NULL,
+ &renderer->command_pool);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkCreateCommandPool", res);
+ goto error;
+ }
+
+ VkCommandBufferAllocateInfo cbai = {0};
+ cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
+ cbai.commandBufferCount = 1u;
+ cbai.commandPool = renderer->command_pool;
+ cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+ res = vkAllocateCommandBuffers(dev->dev, &cbai, &renderer->cb);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkAllocateCommandBuffers", res);
+ goto error;
+ }
+
+ VkFenceCreateInfo fence_info = {0};
+ fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+ res = vkCreateFence(dev->dev, &fence_info, NULL,
+ &renderer->fence);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkCreateFence", res);
+ goto error;
+ }
+
+ // staging command buffer
+ VkCommandBufferAllocateInfo cmd_buf_info = {0};
+ cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
+ cmd_buf_info.commandPool = renderer->command_pool;
+ cmd_buf_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+ cmd_buf_info.commandBufferCount = 1u;
+ res = vkAllocateCommandBuffers(dev->dev, &cmd_buf_info,
+ &renderer->stage.cb);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkAllocateCommandBuffers", res);
+ goto error;
+ }
+
+ return &renderer->wlr_renderer;
+
+error:
+ vulkan_destroy(&renderer->wlr_renderer);
+ return NULL;
+}
+
+struct wlr_renderer *wlr_vk_renderer_create_with_drm_fd(int drm_fd) {
+ wlr_log(WLR_INFO, "The vulkan renderer is only experimental and "
+ "not expected to be ready for daily use");
+
+ // NOTE: we could add functionality to allow the compositor passing its
+ // name and version to this function. Just use dummies until then,
+ // shouldn't be relevant to the driver anyways
+ struct wlr_vk_instance *ini = vulkan_instance_create(0, NULL, default_debug);
+ if (!ini) {
+ wlr_log(WLR_ERROR, "creating vulkan instance for renderer failed");
+ return NULL;
+ }
+
+ VkPhysicalDevice phdev = vulkan_find_drm_phdev(ini, drm_fd);
+ if (!phdev) {
+ // We rather fail here than doing some guesswork
+ wlr_log(WLR_ERROR, "Could not match drm and vulkan device");
+ return NULL;
+ }
+
+ // queue families
+ uint32_t qfam_count;
+ vkGetPhysicalDeviceQueueFamilyProperties(phdev, &qfam_count, NULL);
+ VkQueueFamilyProperties queue_props[qfam_count];
+ vkGetPhysicalDeviceQueueFamilyProperties(phdev, &qfam_count,
+ queue_props);
+
+ struct wlr_vk_device *dev = vulkan_device_create(ini, phdev, 0, NULL);
+ if (!dev) {
+ wlr_log(WLR_ERROR, "Failed to create vulkan device");
+ vulkan_instance_destroy(ini);
+ return NULL;
+ }
+
+ // We duplicate it so it's not closed while we still need it.
+ dev->drm_fd = fcntl(drm_fd, F_DUPFD_CLOEXEC, 0);
+ if (dev->drm_fd < 0) {
+ wlr_log_errno(WLR_ERROR, "fcntl(F_DUPFD_CLOEXEC) failed");
+ vulkan_device_destroy(dev);
+ vulkan_instance_destroy(ini);
+ return NULL;
+ }
+
+ return vulkan_renderer_create_for_device(dev);
+}
diff --git a/render/vulkan/shaders/common.vert b/render/vulkan/shaders/common.vert
new file mode 100644
index 00000000..fa31d26c
--- /dev/null
+++ b/render/vulkan/shaders/common.vert
@@ -0,0 +1,25 @@
+#version 450
+
+// we use a mat4 since it uses the same size as mat3 due to
+// alignment. Easier to deal with (tighly-packed) mat4 though.
+layout(push_constant, row_major) uniform UBO {
+ mat4 proj;
+ vec2 uv_offset;
+ vec2 uv_size;
+} data;
+
+layout(location = 0) out vec2 uv;
+
+// 4 outlining points and uv coords
+const vec2[] values = {
+ {0, 0},
+ {1, 0},
+ {1, 1},
+ {0, 1},
+};
+
+void main() {
+ vec2 pos = values[gl_VertexIndex % 4];
+ uv = data.uv_offset + pos * data.uv_size;
+ gl_Position = data.proj * vec4(pos, 0.0, 1.0);
+}
diff --git a/render/vulkan/shaders/meson.build b/render/vulkan/shaders/meson.build
new file mode 100644
index 00000000..b183c46c
--- /dev/null
+++ b/render/vulkan/shaders/meson.build
@@ -0,0 +1,20 @@
+vulkan_shaders_src = [
+ 'common.vert',
+ 'texture.frag',
+ 'quad.frag',
+]
+
+vulkan_shaders = []
+foreach shader : vulkan_shaders_src
+ name = shader.underscorify() + '_data'
+ args = [glslang, '-V', '@INPUT@', '-o', '@OUTPUT@', '--vn', name]
+ header = custom_target(
+ shader + '_spv',
+ output: shader + '.h',
+ input: shader,
+ command: args)
+
+ vulkan_shaders += [header]
+endforeach
+
+wlr_files += vulkan_shaders
diff --git a/render/vulkan/shaders/quad.frag b/render/vulkan/shaders/quad.frag
new file mode 100644
index 00000000..affd1f11
--- /dev/null
+++ b/render/vulkan/shaders/quad.frag
@@ -0,0 +1,10 @@
+#version 450
+
+layout(location = 0) out vec4 out_color;
+layout(push_constant) uniform UBO {
+ layout(offset = 80) vec4 color;
+} data;
+
+void main() {
+ out_color = data.color;
+}
diff --git a/render/vulkan/shaders/texture.frag b/render/vulkan/shaders/texture.frag
new file mode 100644
index 00000000..7a7b8c57
--- /dev/null
+++ b/render/vulkan/shaders/texture.frag
@@ -0,0 +1,25 @@
+#version 450
+
+layout(set = 0, binding = 0) uniform sampler2D tex;
+
+layout(location = 0) in vec2 uv;
+layout(location = 0) out vec4 out_color;
+
+layout(push_constant) uniform UBO {
+ layout(offset = 80) float alpha;
+} data;
+
+void main() {
+ out_color = textureLod(tex, uv, 0);
+
+ // We expect this shader to output pre-alpha-multiplied color values.
+ // alpha < 0.0 means that this shader should ignore the texture's alpha
+ // value.
+ if (data.alpha < 0.0) {
+ out_color.a = -data.alpha;
+ out_color.rgb *= -data.alpha;
+ } else {
+ out_color *= data.alpha;
+ }
+}
+
diff --git a/render/vulkan/texture.c b/render/vulkan/texture.c
new file mode 100644
index 00000000..b4ba67ed
--- /dev/null
+++ b/render/vulkan/texture.c
@@ -0,0 +1,718 @@
+#define _POSIX_C_SOURCE 200809L
+#include <assert.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <wlr/render/wlr_texture.h>
+#include <wlr/util/log.h>
+#include "render/pixel_format.h"
+#include "render/vulkan.h"
+
+static const struct wlr_texture_impl texture_impl;
+
+struct wlr_vk_texture *vulkan_get_texture(struct wlr_texture *wlr_texture) {
+ assert(wlr_texture->impl == &texture_impl);
+ return (struct wlr_vk_texture *)wlr_texture;
+}
+
+static VkImageAspectFlagBits mem_plane_aspect(unsigned i) {
+ switch (i) {
+ case 0: return VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT;
+ case 1: return VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT;
+ case 2: return VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT;
+ case 3: return VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT;
+ default: abort(); // unreachable
+ }
+}
+
+static bool vulkan_texture_is_opaque(struct wlr_texture *wlr_texture) {
+ struct wlr_vk_texture *texture = vulkan_get_texture(wlr_texture);
+ const struct wlr_pixel_format_info *format_info = drm_get_pixel_format_info(
+ texture->format->drm_format);
+ assert(format_info);
+ return !format_info->has_alpha;
+}
+
+// Will transition the texture to shaderReadOnlyOptimal layout for reading
+// from fragment shader later on
+static bool write_pixels(struct wlr_texture *wlr_texture,
+ uint32_t stride, uint32_t width, uint32_t height, uint32_t src_x,
+ uint32_t src_y, uint32_t dst_x, uint32_t dst_y, const void *vdata,
+ VkImageLayout old_layout, VkPipelineStageFlags src_stage,
+ VkAccessFlags src_access) {
+ VkResult res;
+ struct wlr_vk_texture *texture = vulkan_get_texture(wlr_texture);
+ struct wlr_vk_renderer *renderer = texture->renderer;
+ VkDevice dev = texture->renderer->dev->dev;
+
+ // make sure assumptions are met
+ assert(src_x + width <= texture->wlr_texture.width);
+ assert(src_y + height <= texture->wlr_texture.height);
+ assert(dst_x + width <= texture->wlr_texture.width);
+ assert(dst_y + height <= texture->wlr_texture.height);
+
+ const struct wlr_pixel_format_info *format_info = drm_get_pixel_format_info(
+ texture->format->drm_format);
+ assert(format_info);
+
+ // deferred upload by transfer; using staging buffer
+ // calculate maximum side needed
+ uint32_t bsize = 0;
+ unsigned bytespb = format_info->bpp / 8;
+ bsize += height * bytespb * width;
+
+ // get staging buffer
+ struct wlr_vk_buffer_span span = vulkan_get_stage_span(renderer, bsize);
+ if (!span.buffer || span.alloc.size != bsize) {
+ wlr_log(WLR_ERROR, "Failed to retrieve staging buffer");
+ return false;
+ }
+
+ void *vmap;
+ res = vkMapMemory(dev, span.buffer->memory, span.alloc.start,
+ bsize, 0, &vmap);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkMapMemory", res);
+ return false;
+ }
+ char *map = (char *)vmap;
+
+ // record staging cb
+ // will be executed before next frame
+ VkCommandBuffer cb = vulkan_record_stage_cb(renderer);
+ vulkan_change_layout(cb, texture->image,
+ old_layout, src_stage, src_access,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_ACCESS_TRANSFER_WRITE_BIT);
+
+ // upload data
+ const char *pdata = vdata; // data iterator
+
+ uint32_t packed_stride = bytespb * width;
+ uint32_t buf_off = span.alloc.start + (map - (char *)vmap);
+
+ // write data into staging buffer span
+ pdata += stride * src_y;
+ pdata += bytespb * src_x;
+ if (src_x == 0 && width == texture->wlr_texture.width &&
+ stride == packed_stride) {
+ memcpy(map, pdata, packed_stride * height);
+ map += packed_stride * height;
+ } else {
+ for (unsigned i = 0u; i < height; ++i) {
+ memcpy(map, pdata, packed_stride);
+ pdata += stride;
+ map += packed_stride;
+ }
+ }
+
+ VkBufferImageCopy copy;
+ copy.imageExtent.width = width;
+ copy.imageExtent.height = height;
+ copy.imageExtent.depth = 1;
+ copy.imageOffset.x = dst_x;
+ copy.imageOffset.y = dst_y;
+ copy.imageOffset.z = 0;
+ copy.bufferOffset = buf_off;
+ copy.bufferRowLength = width;
+ copy.bufferImageHeight = height;
+ copy.imageSubresource.mipLevel = 0;
+ copy.imageSubresource.baseArrayLayer = 0;
+ copy.imageSubresource.layerCount = 1;
+ copy.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+
+ assert((uint32_t)(map - (char *)vmap) == bsize);
+ vkUnmapMemory(dev, span.buffer->memory);
+
+ vkCmdCopyBufferToImage(cb, span.buffer->buffer, texture->image,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copy);
+ vulkan_change_layout(cb, texture->image,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
+ VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_ACCESS_SHADER_READ_BIT);
+ texture->last_used = renderer->frame;
+
+ return true;
+}
+
+static bool vulkan_texture_write_pixels(struct wlr_texture *wlr_texture,
+ uint32_t stride, uint32_t width, uint32_t height, uint32_t src_x,
+ uint32_t src_y, uint32_t dst_x, uint32_t dst_y, const void *vdata) {
+ return write_pixels(wlr_texture, stride, width, height, src_x, src_y,
+ dst_x, dst_y, vdata, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
+ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT);
+}
+
+void vulkan_texture_destroy(struct wlr_vk_texture *texture) {
+ if (!texture->renderer) {
+ free(texture);
+ return;
+ }
+
+ // when we recorded a command to fill this image _this_ frame,
+ // it has to be executed before the texture can be destroyed.
+ // Add it to the renderer->destroy_textures list, destroying
+ // _after_ the stage command buffer has exectued
+ if (texture->last_used == texture->renderer->frame) {
+ assert(texture->destroy_link.next == NULL); // not already inserted
+ wl_list_insert(&texture->renderer->destroy_textures,
+ &texture->destroy_link);
+ return;
+ }
+
+ wl_list_remove(&texture->link);
+ wl_list_remove(&texture->buffer_destroy.link);
+
+ VkDevice dev = texture->renderer->dev->dev;
+ if (texture->ds && texture->ds_pool) {
+ vulkan_free_ds(texture->renderer, texture->ds_pool, texture->ds);
+ }
+
+ vkDestroyImageView(dev, texture->image_view, NULL);
+ vkDestroyImage(dev, texture->image, NULL);
+
+ for (unsigned i = 0u; i < texture->mem_count; ++i) {
+ vkFreeMemory(dev, texture->memories[i], NULL);
+ }
+
+ free(texture);
+}
+
+static void vulkan_texture_unref(struct wlr_texture *wlr_texture) {
+ struct wlr_vk_texture *texture = vulkan_get_texture(wlr_texture);
+ if (texture->buffer != NULL) {
+ // Keep the texture around, in case the buffer is re-used later. We're
+ // still listening to the buffer's destroy event.
+ wlr_buffer_unlock(texture->buffer);
+ } else {
+ vulkan_texture_destroy(texture);
+ }
+}
+
+static const struct wlr_texture_impl texture_impl = {
+ .is_opaque = vulkan_texture_is_opaque,
+ .write_pixels = vulkan_texture_write_pixels,
+ .destroy = vulkan_texture_unref,
+};
+
+static struct wlr_vk_texture *vulkan_texture_create(
+ struct wlr_vk_renderer *renderer, uint32_t width, uint32_t height) {
+ struct wlr_vk_texture *texture =
+ calloc(1, sizeof(struct wlr_vk_texture));
+ if (texture == NULL) {
+ wlr_log_errno(WLR_ERROR, "Allocation failed");
+ return NULL;
+ }
+ wlr_texture_init(&texture->wlr_texture, &texture_impl, width, height);
+ texture->renderer = renderer;
+ wl_list_insert(&renderer->textures, &texture->link);
+ wl_list_init(&texture->buffer_destroy.link);
+ return texture;
+}
+
+static struct wlr_texture *vulkan_texture_from_pixels(struct wlr_renderer *wlr_renderer,
+ uint32_t drm_fmt, uint32_t stride, uint32_t width,
+ uint32_t height, const void *data) {
+ struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
+
+ VkResult res;
+ VkDevice dev = renderer->dev->dev;
+
+ wlr_log(WLR_DEBUG, "vulkan_texture_from_pixels: %.4s, %dx%d",
+ (const char*) &drm_fmt, width, height);
+
+ const struct wlr_vk_format_props *fmt =
+ vulkan_format_props_from_drm(renderer->dev, drm_fmt);
+ if (fmt == NULL) {
+ wlr_log(WLR_ERROR, "Unsupported pixel format %"PRIx32 " (%.4s)",
+ drm_fmt, (const char*) &drm_fmt);
+ return NULL;
+ }
+
+ struct wlr_vk_texture *texture = vulkan_texture_create(renderer, width, height);
+ if (texture == NULL) {
+ return NULL;
+ }
+
+ texture->format = &fmt->format;
+
+ // create image
+ unsigned mem_bits = 0xFFFFFFFF;
+
+ VkImageCreateInfo img_info = {0};
+ img_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+ img_info.imageType = VK_IMAGE_TYPE_2D;
+ img_info.format = texture->format->vk_format;
+ img_info.mipLevels = 1;
+ img_info.arrayLayers = 1;
+ img_info.samples = VK_SAMPLE_COUNT_1_BIT;
+ img_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ img_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ img_info.extent = (VkExtent3D) { width, height, 1 };
+ img_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
+
+ img_info.tiling = VK_IMAGE_TILING_OPTIMAL;
+ img_info.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ mem_bits = vulkan_find_mem_type(renderer->dev,
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, mem_bits);
+ VkImageLayout layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ res = vkCreateImage(dev, &img_info, NULL, &texture->image);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkCreateImage failed", res);
+ goto error;
+ }
+
+ // memory
+ VkMemoryRequirements mem_reqs;
+ vkGetImageMemoryRequirements(dev, texture->image, &mem_reqs);
+
+ VkMemoryAllocateInfo mem_info = {0};
+ mem_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ mem_info.allocationSize = mem_reqs.size;
+ mem_info.memoryTypeIndex = mem_bits & mem_reqs.memoryTypeBits;
+ res = vkAllocateMemory(dev, &mem_info, NULL, &texture->memories[0]);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkAllocatorMemory failed", res);
+ goto error;
+ }
+
+ texture->mem_count = 1;
+ res = vkBindImageMemory(dev, texture->image, texture->memories[0], 0);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkBindMemory failed", res);
+ goto error;
+ }
+
+ // view
+ VkImageViewCreateInfo view_info = {0};
+ view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ view_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
+ view_info.format = texture->format->vk_format;
+ view_info.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
+ view_info.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
+ view_info.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
+ view_info.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
+
+ view_info.subresourceRange = (VkImageSubresourceRange) {
+ VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1
+ };
+ view_info.image = texture->image;
+
+ res = vkCreateImageView(dev, &view_info, NULL,
+ &texture->image_view);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkCreateImageView failed", res);
+ goto error;
+ }
+
+ // descriptor
+ texture->ds_pool = vulkan_alloc_texture_ds(renderer, &texture->ds);
+ if (!texture->ds_pool) {
+ wlr_log(WLR_ERROR, "failed to allocate descriptor");
+ goto error;
+ }
+
+ VkDescriptorImageInfo ds_img_info = {0};
+ ds_img_info.imageView = texture->image_view;
+ ds_img_info.imageLayout = layout;
+
+ VkWriteDescriptorSet ds_write = {0};
+ ds_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ ds_write.descriptorCount = 1;
+ ds_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ ds_write.dstSet = texture->ds;
+ ds_write.pImageInfo = &ds_img_info;
+
+ vkUpdateDescriptorSets(dev, 1, &ds_write, 0, NULL);
+
+ // write data
+ if (!write_pixels(&texture->wlr_texture, stride,
+ width, height, 0, 0, 0, 0, data, VK_IMAGE_LAYOUT_UNDEFINED,
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0)) {
+ goto error;
+ }
+
+ return &texture->wlr_texture;
+
+error:
+ vulkan_texture_destroy(texture);
+ return NULL;
+}
+
+static bool is_dmabuf_disjoint(const struct wlr_dmabuf_attributes *attribs) {
+ if (attribs->n_planes == 1) {
+ return false;
+ }
+
+ struct stat first_stat;
+ if (fstat(attribs->fd[0], &first_stat) != 0) {
+ wlr_log_errno(WLR_ERROR, "fstat failed");
+ return true;
+ }
+
+ for (int i = 1; i < attribs->n_planes; i++) {
+ struct stat plane_stat;
+ if (fstat(attribs->fd[i], &plane_stat) != 0) {
+ wlr_log_errno(WLR_ERROR, "fstat failed");
+ return true;
+ }
+
+ if (first_stat.st_ino != plane_stat.st_ino) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+VkImage vulkan_import_dmabuf(struct wlr_vk_renderer *renderer,
+ const struct wlr_dmabuf_attributes *attribs,
+ VkDeviceMemory mems[static WLR_DMABUF_MAX_PLANES], uint32_t *n_mems,
+ bool for_render) {
+ VkResult res;
+ VkDevice dev = renderer->dev->dev;
+ *n_mems = 0u;
+
+ wlr_log(WLR_DEBUG, "vulkan_import_dmabuf: %.4s (mod %"PRIx64"), %dx%d, %d planes",
+ (const char *)&attribs->format, attribs->modifier,
+ attribs->width, attribs->height, attribs->n_planes);
+
+ struct wlr_vk_format_props *fmt = vulkan_format_props_from_drm(renderer->dev,
+ attribs->format);
+ if (fmt == NULL) {
+ wlr_log(WLR_ERROR, "Unsupported pixel format %"PRIx32 " (%.4s)",
+ attribs->format, (const char*) &attribs->format);
+ return VK_NULL_HANDLE;
+ }
+
+ uint32_t plane_count = attribs->n_planes;
+ assert(plane_count < WLR_DMABUF_MAX_PLANES);
+ struct wlr_vk_format_modifier_props *mod =
+ vulkan_format_props_find_modifier(fmt, attribs->modifier, for_render);
+ if (!mod || !(mod->dmabuf_flags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT)) {
+ wlr_log(WLR_ERROR, "Format %"PRIx32" (%.4s) can't be used with modifier "
+ "%"PRIx64, attribs->format, (const char*) &attribs->format,
+ attribs->modifier);
+ return VK_NULL_HANDLE;
+ }
+
+ if ((uint32_t) attribs->width > mod->max_extent.width ||
+ (uint32_t) attribs->height > mod->max_extent.height) {
+ wlr_log(WLR_ERROR, "dmabuf is too large to import");
+ return VK_NULL_HANDLE;
+ }
+
+ if (mod->props.drmFormatModifierPlaneCount != plane_count) {
+ wlr_log(WLR_ERROR, "Number of planes (%d) does not match format (%d)",
+ plane_count, mod->props.drmFormatModifierPlaneCount);
+ return VK_NULL_HANDLE;
+ }
+
+ // check if we have to create the image disjoint
+ bool disjoint = is_dmabuf_disjoint(attribs);
+ if (disjoint && !(mod->props.drmFormatModifierTilingFeatures
+ & VK_FORMAT_FEATURE_DISJOINT_BIT)) {
+ wlr_log(WLR_ERROR, "Format/Modifier does not support disjoint images");
+ return VK_NULL_HANDLE;
+ }
+
+ // image
+ VkExternalMemoryHandleTypeFlagBits htype =
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
+
+ VkImageCreateInfo img_info = {0};
+ img_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+ img_info.imageType = VK_IMAGE_TYPE_2D;
+ img_info.format = fmt->format.vk_format;
+ img_info.mipLevels = 1;
+ img_info.arrayLayers = 1;
+ img_info.samples = VK_SAMPLE_COUNT_1_BIT;
+ img_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ img_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
+ img_info.extent = (VkExtent3D) { attribs->width, attribs->height, 1 };
+ img_info.usage = for_render ?
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT :
+ VK_IMAGE_USAGE_SAMPLED_BIT;
+ if (disjoint) {
+ img_info.flags = VK_IMAGE_CREATE_DISJOINT_BIT;
+ }
+
+ VkExternalMemoryImageCreateInfo eimg = {0};
+ eimg.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
+ eimg.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
+ img_info.pNext = &eimg;
+
+ VkSubresourceLayout plane_layouts[WLR_DMABUF_MAX_PLANES] = {0};
+ VkImageDrmFormatModifierExplicitCreateInfoEXT mod_info = {0};
+
+ img_info.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
+ for (unsigned i = 0u; i < plane_count; ++i) {
+ plane_layouts[i].offset = attribs->offset[i];
+ plane_layouts[i].rowPitch = attribs->stride[i];
+ plane_layouts[i].size = 0;
+ }
+
+ mod_info.sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT;
+ mod_info.drmFormatModifierPlaneCount = plane_count;
+ mod_info.drmFormatModifier = mod->props.drmFormatModifier;
+ mod_info.pPlaneLayouts = plane_layouts;
+ eimg.pNext = &mod_info;
+
+ VkImage image;
+ res = vkCreateImage(dev, &img_info, NULL, &image);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkCreateImage", res);
+ return VK_NULL_HANDLE;
+ }
+
+ unsigned mem_count = disjoint ? plane_count : 1u;
+ VkBindImageMemoryInfo bindi[WLR_DMABUF_MAX_PLANES] = {0};
+ VkBindImagePlaneMemoryInfo planei[WLR_DMABUF_MAX_PLANES] = {0};
+
+ for (unsigned i = 0u; i < mem_count; ++i) {
+ struct VkMemoryFdPropertiesKHR fdp = {0};
+ fdp.sType = VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR;
+ res = renderer->dev->api.getMemoryFdPropertiesKHR(dev, htype,
+ attribs->fd[i], &fdp);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("getMemoryFdPropertiesKHR", res);
+ goto error_image;
+ }
+
+ VkImageMemoryRequirementsInfo2 memri = {0};
+ memri.image = image;
+ memri.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2;
+
+ VkImagePlaneMemoryRequirementsInfo planeri = {0};
+ if (disjoint) {
+ planeri.sType = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO;
+ planeri.planeAspect = mem_plane_aspect(i);
+ memri.pNext = &planeri;
+ }
+
+ VkMemoryRequirements2 memr = {0};
+ memr.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2;
+
+ vkGetImageMemoryRequirements2(dev, &memri, &memr);
+ int mem = vulkan_find_mem_type(renderer->dev, 0,
+ memr.memoryRequirements.memoryTypeBits & fdp.memoryTypeBits);
+ if (mem < 0) {
+ wlr_log(WLR_ERROR, "no valid memory type index");
+ goto error_image;
+ }
+
+ // Since importing transfers ownership of the FD to Vulkan, we have
+ // to duplicate it since this operation does not transfer ownership
+ // of the attribs to this texture. Will be closed by Vulkan on
+ // vkFreeMemory.
+ int dfd = fcntl(attribs->fd[i], F_DUPFD_CLOEXEC, 0);
+ if (dfd < 0) {
+ wlr_log_errno(WLR_ERROR, "fcntl(F_DUPFD_CLOEXEC) failed");
+ goto error_image;
+ }
+
+ VkMemoryAllocateInfo memi = {0};
+ memi.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ memi.allocationSize = memr.memoryRequirements.size;
+ memi.memoryTypeIndex = mem;
+
+ VkImportMemoryFdInfoKHR importi = {0};
+ importi.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR;
+ importi.fd = dfd;
+ importi.handleType = htype;
+ memi.pNext = &importi;
+
+ VkMemoryDedicatedAllocateInfo dedi = {0};
+ dedi.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO;
+ dedi.image = image;
+ importi.pNext = &dedi;
+
+ res = vkAllocateMemory(dev, &memi, NULL, &mems[i]);
+ if (res != VK_SUCCESS) {
+ close(dfd);
+ wlr_vk_error("vkAllocateMemory failed", res);
+ goto error_image;
+ }
+
+ ++(*n_mems);
+
+ // fill bind info
+ bindi[i].image = image;
+ bindi[i].memory = mems[i];
+ bindi[i].memoryOffset = 0;
+ bindi[i].sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO;
+
+ if (disjoint) {
+ planei[i].sType = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO;
+ planei[i].planeAspect = planeri.planeAspect;
+ bindi[i].pNext = &planei[i];
+ }
+ }
+
+ res = vkBindImageMemory2(dev, mem_count, bindi);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkBindMemory failed", res);
+ goto error_image;
+ }
+
+ return image;
+
+error_image:
+ vkDestroyImage(dev, image, NULL);
+ for (size_t i = 0u; i < *n_mems; ++i) {
+ vkFreeMemory(dev, mems[i], NULL);
+ mems[i] = VK_NULL_HANDLE;
+ }
+
+ return VK_NULL_HANDLE;
+}
+
+static struct wlr_texture *vulkan_texture_from_dmabuf(struct wlr_renderer *wlr_renderer,
+ struct wlr_dmabuf_attributes *attribs) {
+ struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
+
+ VkResult res;
+ VkDevice dev = renderer->dev->dev;
+
+ const struct wlr_vk_format_props *fmt = vulkan_format_props_from_drm(
+ renderer->dev, attribs->format);
+ if (fmt == NULL) {
+ wlr_log(WLR_ERROR, "Unsupported pixel format %"PRIx32 " (%.4s)",
+ attribs->format, (const char*) &attribs->format);
+ return NULL;
+ }
+
+ struct wlr_vk_texture *texture = vulkan_texture_create(renderer,
+ attribs->width, attribs->height);
+ if (texture == NULL) {
+ return NULL;
+ }
+
+ texture->format = &fmt->format;
+ texture->image = vulkan_import_dmabuf(renderer, attribs,
+ texture->memories, &texture->mem_count, false);
+ if (!texture->image) {
+ goto error;
+ }
+
+ uint32_t flags = attribs->flags;
+ if (flags & WLR_DMABUF_ATTRIBUTES_FLAGS_Y_INVERT) {
+ texture->invert_y = true;
+ flags &= ~WLR_DMABUF_ATTRIBUTES_FLAGS_Y_INVERT;
+ }
+
+ if (flags != 0) {
+ wlr_log(WLR_ERROR, "dmabuf flags %x not supported/implemented on vulkan",
+ attribs->flags);
+ // NOTE: should probably make this a critical error in future
+ // return VK_NULL_HANDLE;
+ }
+
+ // view
+ VkImageViewCreateInfo view_info = {0};
+ view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ view_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
+ view_info.format = texture->format->vk_format;
+ view_info.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
+ view_info.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
+ view_info.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
+ view_info.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
+
+ view_info.subresourceRange = (VkImageSubresourceRange) {
+ VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1
+ };
+ view_info.image = texture->image;
+
+ res = vkCreateImageView(dev, &view_info, NULL, &texture->image_view);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("vkCreateImageView failed", res);
+ goto error;
+ }
+
+ // descriptor
+ texture->ds_pool = vulkan_alloc_texture_ds(renderer, &texture->ds);
+ if (!texture->ds_pool) {
+ wlr_log(WLR_ERROR, "failed to allocate descriptor");
+ goto error;
+ }
+
+ VkDescriptorImageInfo ds_img_info = {0};
+ ds_img_info.imageView = texture->image_view;
+ ds_img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ VkWriteDescriptorSet ds_write = {0};
+ ds_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ ds_write.descriptorCount = 1;
+ ds_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ ds_write.dstSet = texture->ds;
+ ds_write.pImageInfo = &ds_img_info;
+
+ vkUpdateDescriptorSets(dev, 1, &ds_write, 0, NULL);
+ texture->dmabuf_imported = true;
+
+ return &texture->wlr_texture;
+
+error:
+ vulkan_texture_destroy(texture);
+ return NULL;
+}
+
+static void texture_handle_buffer_destroy(struct wl_listener *listener,
+ void *data) {
+ struct wlr_vk_texture *texture =
+ wl_container_of(listener, texture, buffer_destroy);
+ vulkan_texture_destroy(texture);
+}
+
+static struct wlr_texture *vulkan_texture_from_dmabuf_buffer(
+ struct wlr_vk_renderer *renderer, struct wlr_buffer *buffer,
+ struct wlr_dmabuf_attributes *dmabuf) {
+ struct wlr_vk_texture *texture;
+ wl_list_for_each(texture, &renderer->textures, link) {
+ if (texture->buffer == buffer) {
+ wlr_buffer_lock(texture->buffer);
+ return &texture->wlr_texture;
+ }
+ }
+
+ struct wlr_texture *wlr_texture =
+ vulkan_texture_from_dmabuf(&renderer->wlr_renderer, dmabuf);
+ if (wlr_texture == NULL) {
+ return false;
+ }
+
+ texture = vulkan_get_texture(wlr_texture);
+ texture->buffer = wlr_buffer_lock(buffer);
+
+ texture->buffer_destroy.notify = texture_handle_buffer_destroy;
+ wl_signal_add(&buffer->events.destroy, &texture->buffer_destroy);
+
+ return &texture->wlr_texture;
+}
+
+struct wlr_texture *vulkan_texture_from_buffer(
+ struct wlr_renderer *wlr_renderer,
+ struct wlr_buffer *buffer) {
+ struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
+
+ void *data;
+ uint32_t format;
+ size_t stride;
+ struct wlr_dmabuf_attributes dmabuf;
+ if (wlr_buffer_get_dmabuf(buffer, &dmabuf)) {
+ return vulkan_texture_from_dmabuf_buffer(renderer, buffer, &dmabuf);
+ } else if (wlr_buffer_begin_data_ptr_access(buffer,
+ WLR_BUFFER_DATA_PTR_ACCESS_READ, &data, &format, &stride)) {
+ struct wlr_texture *tex = vulkan_texture_from_pixels(wlr_renderer,
+ format, stride, buffer->width, buffer->height, data);
+ wlr_buffer_end_data_ptr_access(buffer);
+ return tex;
+ } else {
+ return NULL;
+ }
+}
diff --git a/render/vulkan/util.c b/render/vulkan/util.c
new file mode 100644
index 00000000..b15c57eb
--- /dev/null
+++ b/render/vulkan/util.c
@@ -0,0 +1,93 @@
+#include <vulkan/vulkan.h>
+#include <wlr/util/log.h>
+#include "render/vulkan.h"
+
+int vulkan_find_mem_type(struct wlr_vk_device *dev,
+ VkMemoryPropertyFlags flags, uint32_t req_bits) {
+
+ VkPhysicalDeviceMemoryProperties props;
+ vkGetPhysicalDeviceMemoryProperties(dev->phdev, &props);
+
+ for (unsigned i = 0u; i < props.memoryTypeCount; ++i) {
+ if (req_bits & (1 << i)) {
+ if ((props.memoryTypes[i].propertyFlags & flags) == flags) {
+ return i;
+ }
+ }
+ }
+
+ return -1;
+}
+
+const char *vulkan_strerror(VkResult err) {
+ #define ERR_STR(r) case VK_ ##r: return #r
+ switch (err) {
+ ERR_STR(SUCCESS);
+ ERR_STR(NOT_READY);
+ ERR_STR(TIMEOUT);
+ ERR_STR(EVENT_SET);
+ ERR_STR(EVENT_RESET);
+ ERR_STR(INCOMPLETE);
+ ERR_STR(SUBOPTIMAL_KHR);
+ ERR_STR(ERROR_OUT_OF_HOST_MEMORY);
+ ERR_STR(ERROR_OUT_OF_DEVICE_MEMORY);
+ ERR_STR(ERROR_INITIALIZATION_FAILED);
+ ERR_STR(ERROR_DEVICE_LOST);
+ ERR_STR(ERROR_MEMORY_MAP_FAILED);
+ ERR_STR(ERROR_LAYER_NOT_PRESENT);
+ ERR_STR(ERROR_EXTENSION_NOT_PRESENT);
+ ERR_STR(ERROR_FEATURE_NOT_PRESENT);
+ ERR_STR(ERROR_INCOMPATIBLE_DRIVER);
+ ERR_STR(ERROR_TOO_MANY_OBJECTS);
+ ERR_STR(ERROR_FORMAT_NOT_SUPPORTED);
+ ERR_STR(ERROR_SURFACE_LOST_KHR);
+ ERR_STR(ERROR_NATIVE_WINDOW_IN_USE_KHR);
+ ERR_STR(ERROR_OUT_OF_DATE_KHR);
+ ERR_STR(ERROR_FRAGMENTED_POOL);
+ ERR_STR(ERROR_INCOMPATIBLE_DISPLAY_KHR);
+ ERR_STR(ERROR_VALIDATION_FAILED_EXT);
+ ERR_STR(ERROR_INVALID_EXTERNAL_HANDLE);
+ ERR_STR(ERROR_OUT_OF_POOL_MEMORY);
+ ERR_STR(ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT);
+ default:
+ return "<unknown>";
+ }
+ #undef ERR_STR
+}
+
+void vulkan_change_layout_queue(VkCommandBuffer cb, VkImage img,
+ VkImageLayout ol, VkPipelineStageFlags srcs, VkAccessFlags srca,
+ VkImageLayout nl, VkPipelineStageFlags dsts, VkAccessFlags dsta,
+ uint32_t src_family, uint32_t dst_family) {
+ VkImageMemoryBarrier barrier = {0};
+ barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ barrier.oldLayout = ol;
+ barrier.newLayout = nl;
+ barrier.image = img;
+ barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ barrier.subresourceRange.layerCount = 1;
+ barrier.subresourceRange.levelCount = 1;
+ barrier.srcAccessMask = srca;
+ barrier.dstAccessMask = dsta;
+ barrier.srcQueueFamilyIndex = src_family;
+ barrier.dstQueueFamilyIndex = dst_family;
+
+ vkCmdPipelineBarrier(cb, srcs, dsts, 0, 0, NULL, 0, NULL, 1, &barrier);
+}
+
+void vulkan_change_layout(VkCommandBuffer cb, VkImage img,
+ VkImageLayout ol, VkPipelineStageFlags srcs, VkAccessFlags srca,
+ VkImageLayout nl, VkPipelineStageFlags dsts, VkAccessFlags dsta) {
+ vulkan_change_layout_queue(cb, img, ol, srcs, srca, nl, dsts, dsta,
+ VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED);
+}
+
+bool vulkan_has_extension(size_t count, const char **exts, const char *find) {
+ for (unsigned i = 0; i < count; ++i) {
+ if (strcmp(exts[i], find) == 0u) {
+ return true;
+ }
+ }
+
+ return false;
+}
diff --git a/render/vulkan/vulkan.c b/render/vulkan/vulkan.c
new file mode 100644
index 00000000..4932ec4d
--- /dev/null
+++ b/render/vulkan/vulkan.c
@@ -0,0 +1,550 @@
+#include <assert.h>
+#include <math.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <xf86drm.h>
+#include <vulkan/vulkan.h>
+#include <wlr/util/log.h>
+#include <wlr/version.h>
+#include <wlr/config.h>
+#include "render/vulkan.h"
+
+// Returns the name of the first extension that could not be found or NULL.
+static const char *find_extensions(const VkExtensionProperties *avail,
+ unsigned availc, const char **req, unsigned reqc) {
+ // check if all required extensions are supported
+ for (size_t i = 0; i < reqc; ++i) {
+ bool found = false;
+ for (size_t j = 0; j < availc; ++j) {
+ if (!strcmp(avail[j].extensionName, req[i])) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ return req[i];
+ }
+ }
+
+ return NULL;
+}
+
+static VkBool32 debug_callback(VkDebugUtilsMessageSeverityFlagBitsEXT severity,
+ VkDebugUtilsMessageTypeFlagsEXT type,
+ const VkDebugUtilsMessengerCallbackDataEXT *debug_data,
+ void *data) {
+ // we ignore some of the non-helpful warnings
+ static const char *const ignored[] = {
+ // notifies us that shader output is not consumed since
+ // we use the shared vertex buffer with uv output
+ "UNASSIGNED-CoreValidation-Shader-OutputNotConsumed",
+ };
+
+ if (debug_data->pMessageIdName) {
+ for (unsigned i = 0; i < sizeof(ignored) / sizeof(ignored[0]); ++i) {
+ if (strcmp(debug_data->pMessageIdName, ignored[i]) == 0) {
+ return false;
+ }
+ }
+ }
+
+ enum wlr_log_importance importance;
+ switch (severity) {
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT:
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT:
+ importance = WLR_ERROR;
+ break;
+ default:
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT:
+ importance = WLR_INFO;
+ break;
+ }
+
+ wlr_log(importance, "%s (%s)", debug_data->pMessage,
+ debug_data->pMessageIdName);
+ if (debug_data->queueLabelCount > 0) {
+ const char *name = debug_data->pQueueLabels[0].pLabelName;
+ if (name) {
+ wlr_log(importance, " last label '%s'", name);
+ }
+ }
+
+ for (unsigned i = 0; i < debug_data->objectCount; ++i) {
+ if (debug_data->pObjects[i].pObjectName) {
+ wlr_log(importance, " involving '%s'", debug_data->pMessage);
+ }
+ }
+
+ return false;
+}
+
+
+// instance
+struct wlr_vk_instance *vulkan_instance_create(size_t ext_count,
+ const char **exts, bool debug) {
+ // we require vulkan 1.1
+ PFN_vkEnumerateInstanceVersion pfEnumInstanceVersion =
+ (PFN_vkEnumerateInstanceVersion)
+ vkGetInstanceProcAddr(VK_NULL_HANDLE, "vkEnumerateInstanceVersion");
+ if (!pfEnumInstanceVersion) {
+ wlr_log(WLR_ERROR, "wlroots requires vulkan 1.1 which is not available");
+ return NULL;
+ }
+
+ uint32_t ini_version;
+ if (pfEnumInstanceVersion(&ini_version) != VK_SUCCESS ||
+ ini_version < VK_API_VERSION_1_1) {
+ wlr_log(WLR_ERROR, "wlroots requires vulkan 1.1 which is not available");
+ return NULL;
+ }
+
+ // query extension support
+ uint32_t avail_extc = 0;
+ VkResult res;
+ res = vkEnumerateInstanceExtensionProperties(NULL, &avail_extc, NULL);
+ if ((res != VK_SUCCESS) || (avail_extc == 0)) {
+ wlr_vk_error("Could not enumerate instance extensions (1)", res);
+ return NULL;
+ }
+
+ VkExtensionProperties avail_ext_props[avail_extc + 1];
+ res = vkEnumerateInstanceExtensionProperties(NULL, &avail_extc,
+ avail_ext_props);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("Could not enumerate instance extensions (2)", res);
+ return NULL;
+ }
+
+ for (size_t j = 0; j < avail_extc; ++j) {
+ wlr_log(WLR_DEBUG, "Vulkan instance extension %s v%"PRIu32,
+ avail_ext_props[j].extensionName, avail_ext_props[j].specVersion);
+ }
+
+ // create instance
+ struct wlr_vk_instance *ini = calloc(1, sizeof(*ini));
+ if (!ini) {
+ wlr_log_errno(WLR_ERROR, "allocation failed");
+ return NULL;
+ }
+
+ bool debug_utils_found = false;
+ ini->extensions = calloc(1 + ext_count, sizeof(*ini->extensions));
+ if (!ini->extensions) {
+ wlr_log_errno(WLR_ERROR, "allocation failed");
+ goto error;
+ }
+
+ // find extensions
+ for (unsigned i = 0; i < ext_count; ++i) {
+ if (find_extensions(avail_ext_props, avail_extc, &exts[i], 1)) {
+ wlr_log(WLR_DEBUG, "vulkan instance extension %s not found",
+ exts[i]);
+ continue;
+ }
+
+ ini->extensions[ini->extension_count++] = exts[i];
+ }
+
+ if (debug) {
+ const char *name = VK_EXT_DEBUG_UTILS_EXTENSION_NAME;
+ if (find_extensions(avail_ext_props, avail_extc, &name, 1) == NULL) {
+ debug_utils_found = true;
+ ini->extensions[ini->extension_count++] = name;
+ }
+ }
+
+ VkApplicationInfo application_info = {0};
+ application_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
+ application_info.pEngineName = "wlroots";
+ application_info.engineVersion = WLR_VERSION_NUM;
+ application_info.apiVersion = VK_API_VERSION_1_1;
+
+ const char *layers[] = {
+ "VK_LAYER_KHRONOS_validation",
+ // "VK_LAYER_RENDERDOC_Capture",
+ // "VK_LAYER_live_introspection",
+ };
+
+ unsigned layer_count = debug * (sizeof(layers) / sizeof(layers[0]));
+
+ VkInstanceCreateInfo instance_info = {0};
+ instance_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
+ instance_info.pApplicationInfo = &application_info;
+ instance_info.enabledExtensionCount = ini->extension_count;
+ instance_info.ppEnabledExtensionNames = ini->extensions;
+ instance_info.enabledLayerCount = layer_count;
+ instance_info.ppEnabledLayerNames = layers;
+
+ VkDebugUtilsMessageSeverityFlagsEXT severity =
+ // VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
+ VkDebugUtilsMessageTypeFlagsEXT types =
+ // VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
+
+ VkDebugUtilsMessengerCreateInfoEXT debug_info = {0};
+ debug_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
+ debug_info.messageSeverity = severity;
+ debug_info.messageType = types;
+ debug_info.pfnUserCallback = &debug_callback;
+ debug_info.pUserData = ini;
+
+ if (debug_utils_found) {
+ // already adding the debug utils messenger extension to
+ // instance creation gives us additional information during
+ // instance creation and destruction, can be useful for debugging
+ // layers/extensions not being found.
+ instance_info.pNext = &debug_info;
+ }
+
+ res = vkCreateInstance(&instance_info, NULL, &ini->instance);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("Could not create instance", res);
+ goto error;
+ }
+
+ // debug callback
+ if (debug_utils_found) {
+ ini->api.createDebugUtilsMessengerEXT =
+ (PFN_vkCreateDebugUtilsMessengerEXT) vkGetInstanceProcAddr(
+ ini->instance, "vkCreateDebugUtilsMessengerEXT");
+ ini->api.destroyDebugUtilsMessengerEXT =
+ (PFN_vkDestroyDebugUtilsMessengerEXT) vkGetInstanceProcAddr(
+ ini->instance, "vkDestroyDebugUtilsMessengerEXT");
+
+ if (ini->api.createDebugUtilsMessengerEXT) {
+ ini->api.createDebugUtilsMessengerEXT(ini->instance,
+ &debug_info, NULL, &ini->messenger);
+ } else {
+ wlr_log(WLR_ERROR, "vkCreateDebugUtilsMessengerEXT not found");
+ }
+ }
+
+ return ini;
+
+error:
+ vulkan_instance_destroy(ini);
+ return NULL;
+}
+
+void vulkan_instance_destroy(struct wlr_vk_instance *ini) {
+ if (!ini) {
+ return;
+ }
+
+ if (ini->messenger && ini->api.destroyDebugUtilsMessengerEXT) {
+ ini->api.destroyDebugUtilsMessengerEXT(ini->instance,
+ ini->messenger, NULL);
+ }
+
+ if (ini->instance) {
+ vkDestroyInstance(ini->instance, NULL);
+ }
+
+ free(ini->extensions);
+ free(ini);
+}
+
+// physical device matching
+static void log_phdev(const VkPhysicalDeviceProperties *props) {
+ uint32_t vv_major = VK_VERSION_MAJOR(props->apiVersion);
+ uint32_t vv_minor = VK_VERSION_MINOR(props->apiVersion);
+ uint32_t vv_patch = VK_VERSION_PATCH(props->apiVersion);
+
+ uint32_t dv_major = VK_VERSION_MAJOR(props->driverVersion);
+ uint32_t dv_minor = VK_VERSION_MINOR(props->driverVersion);
+ uint32_t dv_patch = VK_VERSION_PATCH(props->driverVersion);
+
+ const char *dev_type = "unknown";
+ switch (props->deviceType) {
+ case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
+ dev_type = "integrated";
+ break;
+ case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
+ dev_type = "discrete";
+ break;
+ case VK_PHYSICAL_DEVICE_TYPE_CPU:
+ dev_type = "cpu";
+ break;
+ case VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU:
+ dev_type = "vgpu";
+ break;
+ default:
+ break;
+ }
+
+ wlr_log(WLR_INFO, "Vulkan device: '%s'", props->deviceName);
+ wlr_log(WLR_INFO, " Device type: '%s'", dev_type);
+ wlr_log(WLR_INFO, " Supported API version: %u.%u.%u", vv_major, vv_minor, vv_patch);
+ wlr_log(WLR_INFO, " Driver version: %u.%u.%u", dv_major, dv_minor, dv_patch);
+}
+
+VkPhysicalDevice vulkan_find_drm_phdev(struct wlr_vk_instance *ini, int drm_fd) {
+ VkResult res;
+ uint32_t num_phdevs;
+
+ res = vkEnumeratePhysicalDevices(ini->instance, &num_phdevs, NULL);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("Could not retrieve physical devices", res);
+ return VK_NULL_HANDLE;
+ }
+
+ VkPhysicalDevice phdevs[1 + num_phdevs];
+ res = vkEnumeratePhysicalDevices(ini->instance, &num_phdevs, phdevs);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("Could not retrieve physical devices", res);
+ return VK_NULL_HANDLE;
+ }
+
+ struct stat drm_stat = {0};
+ if (fstat(drm_fd, &drm_stat) != 0) {
+ wlr_log_errno(WLR_ERROR, "fstat failed");
+ return VK_NULL_HANDLE;
+ }
+
+ for (uint32_t i = 0; i < num_phdevs; ++i) {
+ VkPhysicalDevice phdev = phdevs[i];
+
+ // check whether device supports vulkan 1.1, needed for
+ // vkGetPhysicalDeviceProperties2
+ VkPhysicalDeviceProperties phdev_props;
+ vkGetPhysicalDeviceProperties(phdev, &phdev_props);
+
+ log_phdev(&phdev_props);
+
+ if (phdev_props.apiVersion < VK_API_VERSION_1_1) {
+ // NOTE: we could additionaly check whether the
+ // VkPhysicalDeviceProperties2KHR extension is supported but
+ // implementations not supporting 1.1 are unlikely in future
+ continue;
+ }
+
+ // check for extensions
+ uint32_t avail_extc = 0;
+ res = vkEnumerateDeviceExtensionProperties(phdev, NULL,
+ &avail_extc, NULL);
+ if ((res != VK_SUCCESS) || (avail_extc == 0)) {
+ wlr_vk_error(" Could not enumerate device extensions", res);
+ continue;
+ }
+
+ VkExtensionProperties avail_ext_props[avail_extc + 1];
+ res = vkEnumerateDeviceExtensionProperties(phdev, NULL,
+ &avail_extc, avail_ext_props);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error(" Could not enumerate device extensions", res);
+ continue;
+ }
+
+ const char *name = VK_EXT_PHYSICAL_DEVICE_DRM_EXTENSION_NAME;
+ if (find_extensions(avail_ext_props, avail_extc, &name, 1) != NULL) {
+ wlr_log(WLR_DEBUG, " Ignoring physical device \"%s\": "
+ "VK_EXT_physical_device_drm not supported",
+ phdev_props.deviceName);
+ continue;
+ }
+
+ VkPhysicalDeviceDrmPropertiesEXT drm_props = {0};
+ drm_props.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRM_PROPERTIES_EXT;
+
+ VkPhysicalDeviceProperties2 props = {0};
+ props.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
+ props.pNext = &drm_props;
+
+ vkGetPhysicalDeviceProperties2(phdev, &props);
+
+ dev_t primary_devid = makedev(drm_props.primaryMajor, drm_props.primaryMinor);
+ dev_t render_devid = makedev(drm_props.renderMajor, drm_props.renderMinor);
+ if (primary_devid == drm_stat.st_rdev ||
+ render_devid == drm_stat.st_rdev) {
+ wlr_log(WLR_INFO, "Found matching Vulkan physical device: %s",
+ phdev_props.deviceName);
+ return phdev;
+ }
+ }
+
+ return VK_NULL_HANDLE;
+}
+
+struct wlr_vk_device *vulkan_device_create(struct wlr_vk_instance *ini,
+ VkPhysicalDevice phdev, size_t ext_count, const char **exts) {
+ VkResult res;
+
+ // check for extensions
+ uint32_t avail_extc = 0;
+ res = vkEnumerateDeviceExtensionProperties(phdev, NULL,
+ &avail_extc, NULL);
+ if (res != VK_SUCCESS || avail_extc == 0) {
+ wlr_vk_error("Could not enumerate device extensions (1)", res);
+ return NULL;
+ }
+
+ VkExtensionProperties avail_ext_props[avail_extc + 1];
+ res = vkEnumerateDeviceExtensionProperties(phdev, NULL,
+ &avail_extc, avail_ext_props);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("Could not enumerate device extensions (2)", res);
+ return NULL;
+ }
+
+ for (size_t j = 0; j < avail_extc; ++j) {
+ wlr_log(WLR_DEBUG, "Vulkan device extension %s v%"PRIu32,
+ avail_ext_props[j].extensionName, avail_ext_props[j].specVersion);
+ }
+
+ // create device
+ struct wlr_vk_device *dev = calloc(1, sizeof(*dev));
+ if (!dev) {
+ wlr_log_errno(WLR_ERROR, "allocation failed");
+ return NULL;
+ }
+
+ dev->phdev = phdev;
+ dev->instance = ini;
+ dev->drm_fd = -1;
+ dev->extensions = calloc(16 + ext_count, sizeof(*ini->extensions));
+ if (!dev->extensions) {
+ wlr_log_errno(WLR_ERROR, "allocation failed");
+ goto error;
+ }
+
+ // find extensions
+ for (unsigned i = 0; i < ext_count; ++i) {
+ if (find_extensions(avail_ext_props, avail_extc, &exts[i], 1)) {
+ wlr_log(WLR_DEBUG, "vulkan device extension %s not found",
+ exts[i]);
+ continue;
+ }
+
+ dev->extensions[dev->extension_count++] = exts[i];
+ }
+
+ // For dmabuf import we require at least the external_memory_fd,
+ // external_memory_dma_buf, queue_family_foreign and
+ // image_drm_format_modifier extensions.
+ const char *names[] = {
+ VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
+ VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME, // or vulkan 1.2
+ VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME,
+ VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME,
+ VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME,
+ };
+
+ unsigned nc = sizeof(names) / sizeof(names[0]);
+ const char *not_found = find_extensions(avail_ext_props, avail_extc, names, nc);
+ if (not_found) {
+ wlr_log(WLR_ERROR, "vulkan: required device extension %s not found",
+ not_found);
+ goto error;
+ }
+
+ for (unsigned i = 0u; i < nc; ++i) {
+ dev->extensions[dev->extension_count++] = names[i];
+ }
+
+ // queue families
+ {
+ uint32_t qfam_count;
+ vkGetPhysicalDeviceQueueFamilyProperties(phdev, &qfam_count, NULL);
+ assert(qfam_count > 0);
+ VkQueueFamilyProperties queue_props[qfam_count];
+ vkGetPhysicalDeviceQueueFamilyProperties(phdev, &qfam_count,
+ queue_props);
+
+ bool graphics_found = false;
+ for (unsigned i = 0u; i < qfam_count; ++i) {
+ graphics_found = queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT;
+ if (graphics_found) {
+ dev->queue_family = i;
+ break;
+ }
+ }
+
+ assert(graphics_found);
+ }
+
+ const float prio = 1.f;
+ VkDeviceQueueCreateInfo qinfo = {};
+ qinfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
+ qinfo.queueFamilyIndex = dev->queue_family;
+ qinfo.queueCount = 1;
+ qinfo.pQueuePriorities = &prio;
+
+ VkDeviceCreateInfo dev_info = {0};
+ dev_info.pNext = NULL;
+ dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
+ dev_info.queueCreateInfoCount = 1u;
+ dev_info.pQueueCreateInfos = &qinfo;
+ dev_info.enabledExtensionCount = dev->extension_count;
+ dev_info.ppEnabledExtensionNames = dev->extensions;
+
+ res = vkCreateDevice(phdev, &dev_info, NULL, &dev->dev);
+ if (res != VK_SUCCESS) {
+ wlr_vk_error("Failed to create vulkan device", res);
+ goto error;
+ }
+
+
+ vkGetDeviceQueue(dev->dev, dev->queue_family, 0, &dev->queue);
+
+ // load api
+ dev->api.getMemoryFdPropertiesKHR = (PFN_vkGetMemoryFdPropertiesKHR)
+ vkGetDeviceProcAddr(dev->dev, "vkGetMemoryFdPropertiesKHR");
+
+ if (!dev->api.getMemoryFdPropertiesKHR) {
+ wlr_log(WLR_ERROR, "Failed to retrieve required dev function pointers");
+ goto error;
+ }
+
+ // - check device format support -
+ size_t max_fmts;
+ const struct wlr_vk_format *fmts = vulkan_get_format_list(&max_fmts);
+ dev->shm_formats = calloc(max_fmts, sizeof(*dev->shm_formats));
+ dev->format_props = calloc(max_fmts, sizeof(*dev->format_props));
+ if (!dev->shm_formats || !dev->format_props) {
+ wlr_log_errno(WLR_ERROR, "allocation failed");
+ goto error;
+ }
+
+ for (unsigned i = 0u; i < max_fmts; ++i) {
+ vulkan_format_props_query(dev, &fmts[i]);
+ }
+
+ return dev;
+
+error:
+ vulkan_device_destroy(dev);
+ return NULL;
+}
+
+void vulkan_device_destroy(struct wlr_vk_device *dev) {
+ if (!dev) {
+ return;
+ }
+
+ if (dev->dev) {
+ vkDestroyDevice(dev->dev, NULL);
+ }
+
+ if (dev->drm_fd > 0) {
+ close(dev->drm_fd);
+ }
+
+ wlr_drm_format_set_finish(&dev->dmabuf_render_formats);
+ wlr_drm_format_set_finish(&dev->dmabuf_texture_formats);
+
+ for (unsigned i = 0u; i < dev->format_prop_count; ++i) {
+ vulkan_format_props_finish(&dev->format_props[i]);
+ }
+
+ free(dev->extensions);
+ free(dev->shm_formats);
+ free(dev->format_props);
+ free(dev);
+}
diff --git a/render/wlr_renderer.c b/render/wlr_renderer.c
index 5818cd4a..8a1f1e94 100644
--- a/render/wlr_renderer.c
+++ b/render/wlr_renderer.c
@@ -17,6 +17,10 @@
#include <wlr/render/gles2.h>
#endif
+#if WLR_HAS_VULKAN_RENDERER
+#include <wlr/render/vulkan.h>
+#endif // WLR_HAS_VULKAN_RENDERER
+
#include "util/signal.h"
#include "render/pixel_format.h"
#include "render/wlr_renderer.h"
@@ -258,6 +262,11 @@ struct wlr_renderer *renderer_autocreate_with_drm_fd(int drm_fd) {
return wlr_gles2_renderer_create_with_drm_fd(drm_fd);
}
#endif
+#if WLR_HAS_VULKAN_RENDERER
+ if (strcmp(name, "vulkan") == 0) {
+ return wlr_vk_renderer_create_with_drm_fd(drm_fd);
+ }
+#endif
if (strcmp(name, "pixman") == 0) {
return wlr_pixman_renderer_create();
}