From 36bd4795d4fe2282dfcc59f26863bac2896a4a3f Mon Sep 17 00:00:00 2001 From: emersion Date: Tue, 22 May 2018 17:38:05 +0100 Subject: export-dmabuf: add basic and incomplete implementation --- protocol/meson.build | 3 +- protocol/wlr-export-dmabuf-unstable-v1.xml | 230 +++++++++++++++++++++++++++++ 2 files changed, 232 insertions(+), 1 deletion(-) create mode 100644 protocol/wlr-export-dmabuf-unstable-v1.xml (limited to 'protocol') diff --git a/protocol/meson.build b/protocol/meson.build index 8fa64ca9..a14e9723 100644 --- a/protocol/meson.build +++ b/protocol/meson.build @@ -39,8 +39,9 @@ protocols = [ 'screenshooter.xml', 'server-decoration.xml', 'virtual-keyboard-unstable-v1.xml', - 'wlr-layer-shell-unstable-v1.xml', + 'wlr-export-dmabuf-unstable-v1.xml', 'wlr-input-inhibitor-unstable-v1.xml', + 'wlr-layer-shell-unstable-v1.xml', ] client_protocols = [ diff --git a/protocol/wlr-export-dmabuf-unstable-v1.xml b/protocol/wlr-export-dmabuf-unstable-v1.xml new file mode 100644 index 00000000..6332b146 --- /dev/null +++ b/protocol/wlr-export-dmabuf-unstable-v1.xml @@ -0,0 +1,230 @@ + + + + + Copyright © 2018 Rostislav Pehlivanov + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice (including the next + paragraph) shall be included in all copies or substantial portions of the + Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + + + An interface to capture surfaces in an efficient way. + Overall usage: + + 1.) client registers with zwlr_screencontent_manager_v1 + 2.) server sends client info about surfaces via "receive_surface_info" + 3.) client subscribes to capture a surface via the "capture" requests + 4.) server sends client events via the "zwlr_screencontent_frame" interface + 5.) client finishes and informs server via the "frame_destroy" event + 6.) client optionally resubscribes via repeating steps 3.) through 5.) + + + + + This object represents a frame which is ready to have its resources + fetched and used. + + The receive callback shall be called first, followed by either the + "dma_object" callback once per object or the "dma_layer" callback, + once per layer. The "dma_plane" callback shall only be called after + the "dma_layer" callback corresponding to the layer the plane belongs + to has been called. Finally, the "ready" event is called to indicate that + all the data has been made available for readout, as well as the time + at which presentation happened at. + The ownership of the frame is passed to the client, who's responsible for + destroying it via the "destroy" event once finished. + The data the API describes has been based off of what + VASurfaceAttribExternalBuffers contains. + All frames are read-only and may not be written into or altered. + + + + + Special flags that must be respected by the client. + Transient frames indicate short lifetime frames (such as swapchain + images from external clients). Clients are advised to copy them and do + all processing outside of the "ready" event. + + + + + + + Main callback supplying the client with information about the frame, + as well as an object to serve as context for destruction. Always called + first before any other events. + + The "transform" argument describes the orientation needed to be applied + to correctly orient the buffer. For example, a buffer rotated by 90 + degrees will have a value of "3" here, corresponding to the need to + apply a 270 degree transpose to correctly present the buffer. + + + + + + + + + + + + + + + Callback which serves to supply the client with the file descriptors + containing the data for each object. + + + + + + + + Callback which serves to supply the client with information on what's + contained in each file descriptor and how its laid out. + Will be called after the main receive event, once per layer. + + + + + + + + Callback which supplies the client with plane information for each + layer. + + + + + + + + + + Called as soon as the frame is presented, indicating it is available + for reading. + The timestamp is expressed as tv_sec_hi, tv_sec_lo, tv_nsec triples, + each component being an unsigned 32-bit value. Whole seconds are in + tv_sec which is a 64-bit value combined from tv_sec_hi and tv_sec_lo, + and the additional fractional part in tv_nsec as nanoseconds. Hence, + for valid timestamps tv_nsec must be in [0, 999999999]. + The seconds part may have an arbitrary offset at start. + + + + + + + + If the frame is no longer valid after the "frame" event has been called, + this callback will be used to inform the client to scrap the frame. + Source is still valid for as long as the subscription function does not + return NULL. + This may get called if for instance the surface is in the process of + resizing. + + + + + + Unreferences the frame, allowing it to be reused. Must be called as soon + as its no longer used. + + + + + + + This object is a manager which informs clients about capturable windows + and is able to create callbacks from which to begin to receive content + from. The "title" argument in the "surface_info" event shall be used + to provide a user-readable identifier such as a window title or + program name. + + + + + This will be called whenever a surface that's able to be captured + appears. + + + + + + + Called if a surface becomes unavailable to capture, for example if has + been closed. + + + + + + + Request to start capturing from a surface with a given id. + If an ID becomes unavailable, a NULL will be returned. + + + + + + + + Request to start capturing from an entire wl_output. + If an output becomes unavailable, a NULL will be returned. + + + + + + -- cgit v1.2.3 From 1377e551ef583976142bfb98a1ba5b61f1cc1196 Mon Sep 17 00:00:00 2001 From: emersion Date: Wed, 23 May 2018 22:11:45 +0100 Subject: Update wlr-export-dmabuf protocol --- protocol/wlr-export-dmabuf-unstable-v1.xml | 66 ++++++++++++++++++++---------- types/wlr_export_dmabuf_v1.c | 14 ++++--- 2 files changed, 52 insertions(+), 28 deletions(-) (limited to 'protocol') diff --git a/protocol/wlr-export-dmabuf-unstable-v1.xml b/protocol/wlr-export-dmabuf-unstable-v1.xml index 6332b146..ab9694a6 100644 --- a/protocol/wlr-export-dmabuf-unstable-v1.xml +++ b/protocol/wlr-export-dmabuf-unstable-v1.xml @@ -24,7 +24,7 @@ DEALINGS IN THE SOFTWARE. - + An interface to capture surfaces in an efficient way. Overall usage: @@ -41,28 +41,27 @@ This object represents a frame which is ready to have its resources fetched and used. - The receive callback shall be called first, followed by either the - "dma_object" callback once per object or the "dma_layer" callback, - once per layer. The "dma_plane" callback shall only be called after - the "dma_layer" callback corresponding to the layer the plane belongs - to has been called. Finally, the "ready" event is called to indicate that - all the data has been made available for readout, as well as the time - at which presentation happened at. - The ownership of the frame is passed to the client, who's responsible for - destroying it via the "destroy" event once finished. + The receive callback shall be called first, followed by the "object" + callback once per dmabuf object or the "layer" callback, once per dmabuf + layer. The "plane" callback shall only be called after the "layer" + callback corresponding to the layer the plane belongs to has been called + Finally, the "ready" event is called to indicate that all the data has + been made available for readout, as well as the time at which presentation + happened at. The ownership of the frame is passed to the client, who's + responsible for destroying it via the "destroy" event once finished. The data the API describes has been based off of what VASurfaceAttribExternalBuffers contains. All frames are read-only and may not be written into or altered. - + Special flags that must be respected by the client. Transient frames indicate short lifetime frames (such as swapchain images from external clients). Clients are advised to copy them and do all processing outside of the "ready" event. - + @@ -85,10 +84,10 @@ - - @@ -99,7 +98,7 @@ - + Callback which serves to supply the client with the file descriptors containing the data for each object. @@ -111,7 +110,7 @@ - + Callback which serves to supply the client with information on what's contained in each file descriptor and how its laid out. @@ -124,7 +123,7 @@ - + Callback which supplies the client with plane information for each layer. @@ -158,6 +157,21 @@ + + + + Indicates reason for aborting the frame. + + + + + + If the frame is no longer valid after the "frame" event has been called, @@ -167,6 +181,8 @@ This may get called if for instance the surface is in the process of resizing. + @@ -208,23 +224,29 @@ Request to start capturing from a surface with a given id. - If an ID becomes unavailable, a NULL will be returned. + - Request to start capturing from an entire wl_output. - If an output becomes unavailable, a NULL will be returned. - + + + + + + All objects created by the manager will still remain valid, until their + appropriate destroy request has been called. + diff --git a/types/wlr_export_dmabuf_v1.c b/types/wlr_export_dmabuf_v1.c index 2f1c88f2..4a7382a3 100644 --- a/types/wlr_export_dmabuf_v1.c +++ b/types/wlr_export_dmabuf_v1.c @@ -61,13 +61,13 @@ static struct wlr_export_dmabuf_manager_v1 *manager_from_resource( static void manager_handle_capture_client(struct wl_client *client, struct wl_resource *manager_resource, uint32_t id, - uint32_t client_id, int32_t overlay_cursor) { + int32_t overlay_cursor, uint32_t client_id) { // TODO } static void manager_handle_capture_output(struct wl_client *client, struct wl_resource *manager_resource, uint32_t id, - struct wl_resource *output_resource) { + int32_t overlay_cursor, struct wl_resource *output_resource) { struct wlr_export_dmabuf_manager_v1 *manager = manager_from_resource(manager_resource); struct wlr_output *output = wlr_output_from_resource(output_resource); @@ -95,7 +95,9 @@ static void manager_handle_capture_output(struct wl_client *client, struct wlr_dmabuf_buffer_attribs attribs; if (!wlr_output_export_dmabuf(output, &attribs)) { - zwlr_export_dmabuf_frame_v1_send_abort(frame->resource); + wl_list_init(&frame->output_swap_buffers.link); + // TODO: abort reason + zwlr_export_dmabuf_frame_v1_send_abort(frame->resource, 0); return; } @@ -109,16 +111,16 @@ static void manager_handle_capture_output(struct wl_client *client, output->width, output->height, output->scale, output->transform, attribs.flags, frame_flags, mod_high, mod_low, attribs.n_planes, 1); - zwlr_export_dmabuf_frame_v1_send_dma_layer(frame->resource, 0, + zwlr_export_dmabuf_frame_v1_send_layer(frame->resource, 0, attribs.format, 1); for (int i = 0; i < attribs.n_planes; ++i) { // TODO: what to do if the kernel doesn't support seek on buffer off_t size = lseek(attribs.fd[i], 0, SEEK_END); - zwlr_export_dmabuf_frame_v1_send_dma_object(frame->resource, i, + zwlr_export_dmabuf_frame_v1_send_object(frame->resource, i, attribs.fd[i], size); - zwlr_export_dmabuf_frame_v1_send_dma_plane(frame->resource, i, 0, i, + zwlr_export_dmabuf_frame_v1_send_plane(frame->resource, i, 0, i, attribs.offset[i], attribs.stride[i]); } -- cgit v1.2.3 From a16ad4327a07157ef2477e036456e8533c47a16e Mon Sep 17 00:00:00 2001 From: emersion Date: Sat, 26 May 2018 08:15:49 +0100 Subject: Update protocol --- protocol/wlr-export-dmabuf-unstable-v1.xml | 117 ++++++++--------------------- types/wlr_export_dmabuf_v1.c | 24 ++---- 2 files changed, 40 insertions(+), 101 deletions(-) (limited to 'protocol') diff --git a/protocol/wlr-export-dmabuf-unstable-v1.xml b/protocol/wlr-export-dmabuf-unstable-v1.xml index ab9694a6..760345a7 100644 --- a/protocol/wlr-export-dmabuf-unstable-v1.xml +++ b/protocol/wlr-export-dmabuf-unstable-v1.xml @@ -42,26 +42,21 @@ fetched and used. The receive callback shall be called first, followed by the "object" - callback once per dmabuf object or the "layer" callback, once per dmabuf - layer. The "plane" callback shall only be called after the "layer" - callback corresponding to the layer the plane belongs to has been called - Finally, the "ready" event is called to indicate that all the data has + callback once per dmabuf object or the "plane" callback, once per dmabuf + plane. The "ready" event is called last to indicate that all the data has been made available for readout, as well as the time at which presentation happened at. The ownership of the frame is passed to the client, who's - responsible for destroying it via the "destroy" event once finished. - The data the API describes has been based off of what - VASurfaceAttribExternalBuffers contains. + responsible for destroying it via the "destroy" event once finished and + by calling close() on the file descriptors received. All frames are read-only and may not be written into or altered. - Special flags that must be respected by the client. - Transient frames indicate short lifetime frames (such as swapchain - images from external clients). Clients are advised to copy them and do - all processing outside of the "ready" event. + Special flags that should be respected by the client. - + @@ -79,24 +74,25 @@ summary="frame width, scaling factor included"/> - - + + + - + @@ -110,28 +106,13 @@ - - - Callback which serves to supply the client with information on what's - contained in each file descriptor and how its laid out. - Will be called after the main receive event, once per layer. - - - - - Callback which supplies the client with plane information for each - layer. + plane. - - - - Indicates reason for aborting the frame. + + + Indicates reason for cancelling the frame. - - - + + + - + If the frame is no longer valid after the "frame" event has been called, this callback will be used to inform the client to scrap the frame. @@ -181,57 +160,27 @@ This may get called if for instance the surface is in the process of resizing. - + Unreferences the frame, allowing it to be reused. Must be called as soon as its no longer used. + Can be called at any time by the client after the "frame" event, after + which the compositor will not call any other events unless the client + resubscribes to capture more. The client will still have to close any + FDs it has been given. - This object is a manager which informs clients about capturable windows - and is able to create callbacks from which to begin to receive content - from. The "title" argument in the "surface_info" event shall be used - to provide a user-readable identifier such as a window title or - program name. + This object is a manager with which to start capturing from sources. - - - This will be called whenever a surface that's able to be captured - appears. - - - - - - - Called if a surface becomes unavailable to capture, for example if has - been closed. - - - - - - - Request to start capturing from a surface with a given id. - - - - - Request to start capturing from an entire wl_output. diff --git a/types/wlr_export_dmabuf_v1.c b/types/wlr_export_dmabuf_v1.c index 572da262..a2faf2ff 100644 --- a/types/wlr_export_dmabuf_v1.c +++ b/types/wlr_export_dmabuf_v1.c @@ -61,12 +61,6 @@ static struct wlr_export_dmabuf_manager_v1 *manager_from_resource( return wl_resource_get_user_data(resource); } -static void manager_handle_capture_client(struct wl_client *client, - struct wl_resource *manager_resource, uint32_t id, - int32_t overlay_cursor, uint32_t client_id) { - // TODO -} - static void manager_handle_capture_output(struct wl_client *client, struct wl_resource *manager_resource, uint32_t id, int32_t overlay_cursor, struct wl_resource *output_resource) { @@ -98,30 +92,27 @@ static void manager_handle_capture_output(struct wl_client *client, struct wlr_dmabuf_buffer_attribs *attribs = &frame->attribs; if (!wlr_output_export_dmabuf(output, attribs)) { - // TODO: abort reason - zwlr_export_dmabuf_frame_v1_send_abort(frame->resource, 0); + zwlr_export_dmabuf_frame_v1_send_cancel(frame->resource, + ZWLR_EXPORT_DMABUF_FRAME_V1_CANCEL_REASON_TEMPORARY); return; } assert(attribs->n_planes > 0); - uint32_t frame_flags = 0; + uint32_t frame_flags = ZWLR_EXPORT_DMABUF_FRAME_V1_FLAGS_TRANSIENT; uint32_t mod_high = attribs->modifier[0] >> 32; uint32_t mod_low = attribs->modifier[0] & 0xFFFFFFFF; zwlr_export_dmabuf_frame_v1_send_frame(frame->resource, - output->width, output->height, output->scale, output->transform, - attribs->flags, frame_flags, mod_high, mod_low, attribs->n_planes, 1); - - zwlr_export_dmabuf_frame_v1_send_layer(frame->resource, 0, - attribs->format, attribs->n_planes); + output->width, output->height, 0, 0, attribs->flags, frame_flags, + attribs->format, mod_high, mod_low, attribs->n_planes, + attribs->n_planes); for (int i = 0; i < attribs->n_planes; ++i) { - // TODO: what to do if the kernel doesn't support seek on buffer off_t size = lseek(attribs->fd[i], 0, SEEK_END); zwlr_export_dmabuf_frame_v1_send_object(frame->resource, i, attribs->fd[i], size); - zwlr_export_dmabuf_frame_v1_send_plane(frame->resource, i, 0, i, + zwlr_export_dmabuf_frame_v1_send_plane(frame->resource, i, i, attribs->offset[i], attribs->stride[i]); } @@ -131,7 +122,6 @@ static void manager_handle_capture_output(struct wl_client *client, } static const struct zwlr_export_dmabuf_manager_v1_interface manager_impl = { - .capture_client = manager_handle_capture_client, .capture_output = manager_handle_capture_output, }; -- cgit v1.2.3 From b9b397ef8094b221bc1042aedf0dbbbb5d9a5f1e Mon Sep 17 00:00:00 2001 From: Rostislav Pehlivanov Date: Sun, 27 May 2018 04:03:29 +0100 Subject: Add a demo client for dmabuf export --- examples/dmabuf-capture.c | 767 ++++++++++++++++++++++++++++++++++++++++++++++ examples/meson.build | 10 + protocol/meson.build | 1 + 3 files changed, 778 insertions(+) create mode 100644 examples/dmabuf-capture.c (limited to 'protocol') diff --git a/examples/dmabuf-capture.c b/examples/dmabuf-capture.c new file mode 100644 index 00000000..f249d437 --- /dev/null +++ b/examples/dmabuf-capture.c @@ -0,0 +1,767 @@ +#define _XOPEN_SOURCE 700 +#define _POSIX_C_SOURCE 199309L +#include +#include +#include +#include +#include +#include + +#include "wlr-export-dmabuf-unstable-v1-client-protocol.h" + +#include +#include +#include +#include + +struct wayland_output { + struct wl_list link; + uint32_t id; + struct wl_output *output; + char *make; + char *model; + int width; + int height; + AVRational framerate; +}; + +struct capture_context { + AVClass *class; /* For pretty logging */ + struct wl_display *display; + struct wl_registry *registry; + struct zwlr_export_dmabuf_manager_v1 *export_manager; + + struct wl_list output_list; + + /* Target */ + struct wl_output *target_output; + uint32_t target_client; + + /* Main frame callback */ + struct zwlr_export_dmabuf_frame_v1 *frame_callback; + + /* If something happens during capture */ + int err; + int quit; + + /* FFmpeg specific parts */ + AVFrame *current_frame; + AVBufferRef *drm_device_ref; + AVBufferRef *drm_frames_ref; + + AVBufferRef *mapped_device_ref; + AVBufferRef *mapped_frames_ref; + + AVFormatContext *avf; + AVCodecContext *avctx; + + int64_t start_pts; + + /* Config */ + enum AVPixelFormat software_format; + enum AVHWDeviceType hw_device_type; + AVDictionary *encoder_opts; + int is_software_encoder; + char *hardware_device; + char *out_filename; + char *encoder_name; + float out_bitrate; +}; + +static void output_handle_geometry(void *data, struct wl_output *wl_output, + int32_t x, int32_t y, int32_t phys_width, int32_t phys_height, + int32_t subpixel, const char *make, const char *model, + int32_t transform) { + struct wayland_output *output = data; + output->make = av_strdup(make); + output->model = av_strdup(model); +} + +static void output_handle_mode(void *data, struct wl_output *wl_output, + uint32_t flags, int32_t width, int32_t height, int32_t refresh) { + if (flags & WL_OUTPUT_MODE_CURRENT) { + struct wayland_output *output = data; + output->width = width; + output->height = height; + output->framerate = (AVRational){ refresh, 1000 }; + } +} + +static void output_handle_done(void* data, struct wl_output *wl_output) { + /* Nothing to do */ +} + +static void output_handle_scale(void* data, struct wl_output *wl_output, + int32_t factor) { + /* Nothing to do */ +} + +static const struct wl_output_listener output_listener = { + output_handle_geometry, + output_handle_mode, + output_handle_done, + output_handle_scale, +}; + +static void registry_handle_add(void *data, struct wl_registry *reg, + uint32_t id, const char *interface, uint32_t ver) { + struct capture_context *ctx = data; + + if (!strcmp(interface, wl_output_interface.name)) { + struct wayland_output *output = av_mallocz(sizeof(*output)); + + output->id = id; + output->output = wl_registry_bind(reg, id, &wl_output_interface, 1); + + wl_output_add_listener(output->output, &output_listener, output); + wl_list_insert(&ctx->output_list, &output->link); + } + + if (!strcmp(interface, zwlr_export_dmabuf_manager_v1_interface.name)) { + ctx->export_manager = wl_registry_bind(reg, id, + &zwlr_export_dmabuf_manager_v1_interface, 1); + } +} + +static void remove_output(struct wayland_output *out) { + wl_list_remove(&out->link); + av_free(out->make); + av_free(out->model); + av_free(out); + return; +} + +static struct wayland_output *find_output(struct capture_context *ctx, + struct wl_output *out, uint32_t id) { + struct wayland_output *output, *tmp; + wl_list_for_each_safe(output, tmp, &ctx->output_list, link) + if ((output->output == out) || (output->id == id)) + return output; + return NULL; +} + +static void registry_handle_remove(void *data, struct wl_registry *reg, + uint32_t id) { + remove_output(find_output((struct capture_context *)data, NULL, id)); +} + +static const struct wl_registry_listener registry_listener = { + registry_handle_add, + registry_handle_remove, +}; + +static void frame_free(void *opaque, uint8_t *data) { + AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor *)data; + + for (int i = 0; i < desc->nb_objects; ++i) { + close(desc->objects[i].fd); + } + + zwlr_export_dmabuf_frame_v1_destroy(opaque); + + av_free(data); +} + +static void frame_start(void *data, struct zwlr_export_dmabuf_frame_v1 *frame, + uint32_t width, uint32_t height, uint32_t offset_x, uint32_t offset_y, + uint32_t buffer_flags, uint32_t flags, uint32_t format, + uint32_t mod_high, uint32_t mod_low, uint32_t num_objects, + uint32_t num_planes) { + struct capture_context *ctx = data; + int err = 0; + + /* Allocate DRM specific struct */ + AVDRMFrameDescriptor *desc = av_mallocz(sizeof(*desc)); + if (!desc) { + err = AVERROR(ENOMEM); + goto fail; + } + + desc->nb_objects = num_objects; + desc->objects[0].format_modifier = ((uint64_t)mod_high << 32) | mod_low; + + desc->nb_layers = 1; + desc->layers[0].format = format; + desc->layers[0].nb_planes = num_planes; + + /* Allocate a frame */ + AVFrame *f = av_frame_alloc(); + if (!f) { + err = AVERROR(ENOMEM); + goto fail; + } + + /* Set base frame properties */ + ctx->current_frame = f; + f->width = width; + f->height = height; + f->format = AV_PIX_FMT_DRM_PRIME; + + /* Set the frame data to the DRM specific struct */ + f->buf[0] = av_buffer_create((uint8_t*)desc, sizeof(*desc), + &frame_free, frame, 0); + if (!f->buf[0]) { + err = AVERROR(ENOMEM); + goto fail; + } + + f->data[0] = (uint8_t*)desc; + + return; + +fail: + ctx->err = err; + frame_free(frame, (uint8_t *)desc); +} + +static void frame_object(void *data, struct zwlr_export_dmabuf_frame_v1 *frame, + uint32_t index, int32_t fd, uint32_t size) { + struct capture_context *ctx = data; + AVFrame *f = ctx->current_frame; + AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor *)f->data[0]; + + desc->objects[index].fd = fd; + desc->objects[index].size = size; +} + +static void frame_plane(void *data, struct zwlr_export_dmabuf_frame_v1 *frame, + uint32_t index, uint32_t object_index, + uint32_t offset, uint32_t stride) { + struct capture_context *ctx = data; + AVFrame *f = ctx->current_frame; + AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor *)f->data[0]; + + desc->layers[0].planes[index].object_index = object_index; + desc->layers[0].planes[index].offset = offset; + desc->layers[0].planes[index].pitch = stride; +} + +static const uint32_t pixfmt_to_drm_map[] = { + [AV_PIX_FMT_NV12] = WL_SHM_FORMAT_NV12, + [AV_PIX_FMT_BGRA] = WL_SHM_FORMAT_ARGB8888, + [AV_PIX_FMT_BGR0] = WL_SHM_FORMAT_XRGB8888, + [AV_PIX_FMT_RGBA] = WL_SHM_FORMAT_ABGR8888, + [AV_PIX_FMT_RGB0] = WL_SHM_FORMAT_XBGR8888, + [AV_PIX_FMT_ABGR] = WL_SHM_FORMAT_RGBA8888, + [AV_PIX_FMT_0BGR] = WL_SHM_FORMAT_RGBX8888, + [AV_PIX_FMT_ARGB] = WL_SHM_FORMAT_BGRA8888, + [AV_PIX_FMT_0RGB] = WL_SHM_FORMAT_BGRX8888, +}; + +static enum AVPixelFormat drm_fmt_to_pixfmt(uint32_t fmt) { + for (enum AVPixelFormat i = 0; i < AV_PIX_FMT_NB; i++) { + if (pixfmt_to_drm_map[i] == fmt) { + return i; + } + } + return AV_PIX_FMT_NONE; +} + +static int attach_drm_frames_ref(struct capture_context *ctx, AVFrame *f, + enum AVPixelFormat sw_format) { + int err = 0; + AVHWFramesContext *hwfc; + + if (ctx->drm_frames_ref) { + hwfc = (AVHWFramesContext*)ctx->drm_frames_ref->data; + if (hwfc->width == f->width && hwfc->height == f->height && + hwfc->sw_format == sw_format) { + goto attach; + } + av_buffer_unref(&ctx->drm_frames_ref); + } + + ctx->drm_frames_ref = av_hwframe_ctx_alloc(ctx->drm_device_ref); + if (!ctx->drm_frames_ref) { + err = AVERROR(ENOMEM); + goto fail; + } + + hwfc = (AVHWFramesContext*)ctx->drm_frames_ref->data; + + hwfc->format = f->format; + hwfc->sw_format = sw_format; + hwfc->width = f->width; + hwfc->height = f->height; + + err = av_hwframe_ctx_init(ctx->drm_frames_ref); + if (err) { + av_log(ctx, AV_LOG_ERROR, "AVHWFramesContext init failed: %s!\n", + av_err2str(err)); + goto fail; + } + +attach: + /* Set frame hardware context referencce */ + f->hw_frames_ctx = av_buffer_ref(ctx->drm_frames_ref); + if (!f->hw_frames_ctx) { + err = AVERROR(ENOMEM); + goto fail; + } + + return 0; + +fail: + av_buffer_unref(&ctx->drm_frames_ref); + return err; +} + +static void register_cb(struct capture_context *ctx); + +static void frame_ready(void *data, struct zwlr_export_dmabuf_frame_v1 *frame, + uint32_t tv_sec_hi, uint32_t tv_sec_lo, uint32_t tv_nsec) { + struct capture_context *ctx = data; + AVFrame *f = ctx->current_frame; + AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor *)f->data[0]; + int err = 0; + + /* Attach the hardware frame context to the frame */ + err = attach_drm_frames_ref(ctx, f, drm_fmt_to_pixfmt(desc->layers[0].format)); + if (err) { + goto end; + } + + AVFrame *mapped_frame = av_frame_alloc(); + if (!mapped_frame) { + err = AVERROR(ENOMEM); + goto end; + } + + AVHWFramesContext *mapped_hwfc; + mapped_hwfc = (AVHWFramesContext *)ctx->mapped_frames_ref->data; + mapped_frame->format = mapped_hwfc->format; + + /* Set frame hardware context referencce */ + mapped_frame->hw_frames_ctx = av_buffer_ref(ctx->mapped_frames_ref); + if (!mapped_frame->hw_frames_ctx) { + err = AVERROR(ENOMEM); + goto end; + } + + err = av_hwframe_map(mapped_frame, f, 0); + if (err) { + av_log(ctx, AV_LOG_ERROR, "Error mapping: %s!\n", av_err2str(err)); + goto end; + } + + AVFrame *enc_input = mapped_frame; + + if (ctx->is_software_encoder) { + AVFrame *soft_frame = av_frame_alloc(); + av_hwframe_transfer_data(soft_frame, mapped_frame, 0); + av_frame_free(&mapped_frame); + enc_input = soft_frame; + } + + /* Nanoseconds */ + enc_input->pts = (((uint64_t)tv_sec_hi) << 32) | tv_sec_lo; + enc_input->pts *= 1000000000; + enc_input->pts += tv_nsec; + + if (!ctx->start_pts) { + ctx->start_pts = enc_input->pts; + } + + enc_input->pts -= ctx->start_pts; + + enc_input->pts = av_rescale_q(enc_input->pts, (AVRational){ 1, 1000000000 }, + ctx->avctx->time_base); + + do { + err = avcodec_send_frame(ctx->avctx, enc_input); + + av_frame_free(&enc_input); + + if (err) { + av_log(ctx, AV_LOG_ERROR, "Error encoding: %s!\n", av_err2str(err)); + goto end; + } + + while (1) { + AVPacket pkt; + av_init_packet(&pkt); + + int ret = avcodec_receive_packet(ctx->avctx, &pkt); + if (ret == AVERROR(EAGAIN)) { + break; + } else if (ret == AVERROR_EOF) { + av_log(ctx, AV_LOG_INFO, "Encoder flushed!\n"); + ctx->quit = 2; + goto end; + } else if (ret) { + av_log(ctx, AV_LOG_ERROR, "Error encoding: %s!\n", + av_err2str(ret)); + err = ret; + goto end; + } + + pkt.stream_index = 0; + err = av_interleaved_write_frame(ctx->avf, &pkt); + + av_packet_unref(&pkt); + + if (err) { + av_log(ctx, AV_LOG_ERROR, "Writing packet fail: %s!\n", + av_err2str(err)); + goto end; + } + }; + } while (ctx->quit); + + av_log(NULL, AV_LOG_INFO, "Encoded frame %i!\n", ctx->avctx->frame_number); + + register_cb(ctx); + +end: + ctx->err = err; + av_frame_free(&ctx->current_frame); +} + +static void frame_cancel(void *data, struct zwlr_export_dmabuf_frame_v1 *frame, + uint32_t reason) { + struct capture_context *ctx = data; + av_log(ctx, AV_LOG_WARNING, "Frame cancelled!\n"); + av_frame_free(&ctx->current_frame); + if (reason != ZWLR_EXPORT_DMABUF_FRAME_V1_CANCEL_REASON_PERNAMENT) + register_cb(ctx); +} + +static const struct zwlr_export_dmabuf_frame_v1_listener frame_listener = { + frame_start, + frame_object, + frame_plane, + frame_ready, + frame_cancel, +}; + +static void register_cb(struct capture_context *ctx) +{ + ctx->frame_callback = + zwlr_export_dmabuf_manager_v1_capture_output(ctx->export_manager, 0, + ctx->target_output); + + zwlr_export_dmabuf_frame_v1_add_listener(ctx->frame_callback, + &frame_listener, ctx); +} + +static int init_lavu_hwcontext(struct capture_context *ctx) { + + /* DRM hwcontext */ + ctx->drm_device_ref = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_DRM); + if (!ctx->drm_device_ref) + return AVERROR(ENOMEM); + + AVHWDeviceContext *ref_data = (AVHWDeviceContext*)ctx->drm_device_ref->data; + AVDRMDeviceContext *hwctx = ref_data->hwctx; + + /* We don't need a device (we don't even know it and can't open it) */ + hwctx->fd = -1; + + av_hwdevice_ctx_init(ctx->drm_device_ref); + + /* Mapped hwcontext */ + int err = av_hwdevice_ctx_create(&ctx->mapped_device_ref, + ctx->hw_device_type, ctx->hardware_device, NULL, 0); + if (err < 0) { + av_log(ctx, AV_LOG_ERROR, "Failed to create a hardware device: %s\n", + av_err2str(err)); + return err; + } + + return 0; +} + +static int set_hwframe_ctx(struct capture_context *ctx, + AVBufferRef *hw_device_ctx) +{ + AVHWFramesContext *frames_ctx = NULL; + int err = 0; + + if (!(ctx->mapped_frames_ref = av_hwframe_ctx_alloc(hw_device_ctx))) { + return AVERROR(ENOMEM); + } + + AVHWFramesConstraints *cst = + av_hwdevice_get_hwframe_constraints(ctx->mapped_device_ref, NULL); + if (!cst) { + av_log(ctx, AV_LOG_ERROR, "Failed to get hw device constraints!\n"); + av_buffer_unref(&ctx->mapped_frames_ref); + return AVERROR(ENOMEM); + } + + frames_ctx = (AVHWFramesContext *)(ctx->mapped_frames_ref->data); + frames_ctx->format = cst->valid_hw_formats[0]; + frames_ctx->sw_format = ctx->avctx->pix_fmt; + frames_ctx->width = ctx->avctx->width; + frames_ctx->height = ctx->avctx->height; + frames_ctx->initial_pool_size = 16; + + av_hwframe_constraints_free(&cst); + + if ((err = av_hwframe_ctx_init(ctx->mapped_frames_ref)) < 0) { + av_log(ctx, AV_LOG_ERROR, "Failed to initialize hw frame context: %s!\n", + av_err2str(err)); + av_buffer_unref(&ctx->mapped_frames_ref); + return err; + } + + if (!ctx->is_software_encoder) { + ctx->avctx->pix_fmt = frames_ctx->format; + ctx->avctx->hw_frames_ctx = av_buffer_ref(ctx->mapped_frames_ref); + if (!ctx->avctx->hw_frames_ctx) { + av_buffer_unref(&ctx->mapped_frames_ref); + err = AVERROR(ENOMEM); + } + } + + return err; +} + +static int init_encoding(struct capture_context *ctx) { + int err; + + /* lavf init */ + err = avformat_alloc_output_context2(&ctx->avf, NULL, + NULL, ctx->out_filename); + if (err) { + av_log(ctx, AV_LOG_ERROR, "Unable to init lavf context!\n"); + return err; + } + + AVStream *st = avformat_new_stream(ctx->avf, NULL); + if (!st) { + av_log(ctx, AV_LOG_ERROR, "Unable to alloc stream!\n"); + return 1; + } + + /* Find encoder */ + AVCodec *out_codec = avcodec_find_encoder_by_name(ctx->encoder_name); + if (!out_codec) { + av_log(ctx, AV_LOG_ERROR, "Codec not found (not compiled in lavc?)!\n"); + return AVERROR(EINVAL); + } + ctx->avf->oformat->video_codec = out_codec->id; + ctx->is_software_encoder = !(out_codec->capabilities & AV_CODEC_CAP_HARDWARE); + + ctx->avctx = avcodec_alloc_context3(out_codec); + if (!ctx->avctx) + return 1; + + ctx->avctx->opaque = ctx; + ctx->avctx->bit_rate = (int)ctx->out_bitrate*1000000.0f; + ctx->avctx->pix_fmt = ctx->software_format; + ctx->avctx->time_base = (AVRational){ 1, 1000 }; + ctx->avctx->compression_level = 7; + ctx->avctx->width = find_output(ctx, ctx->target_output, 0)->width; + ctx->avctx->height = find_output(ctx, ctx->target_output, 0)->height; + + if (ctx->avf->oformat->flags & AVFMT_GLOBALHEADER) + ctx->avctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; + + st->id = 0; + st->time_base = ctx->avctx->time_base; + st->avg_frame_rate = find_output(ctx, ctx->target_output, 0)->framerate; + + /* Init hw frames context */ + err = set_hwframe_ctx(ctx, ctx->mapped_device_ref); + if (err) + return err; + + err = avcodec_open2(ctx->avctx, out_codec, &ctx->encoder_opts); + if (err) { + av_log(ctx, AV_LOG_ERROR, "Cannot open encoder: %s!\n", + av_err2str(err)); + return err; + } + + if (avcodec_parameters_from_context(st->codecpar, ctx->avctx) < 0) { + av_log(ctx, AV_LOG_ERROR, "Couldn't copy codec params: %s!\n", + av_err2str(err)); + return err; + } + + /* Debug print */ + av_dump_format(ctx->avf, 0, ctx->out_filename, 1); + + /* Open for writing */ + err = avio_open(&ctx->avf->pb, ctx->out_filename, AVIO_FLAG_WRITE); + if (err) { + av_log(ctx, AV_LOG_ERROR, "Couldn't open %s: %s!\n", ctx->out_filename, + av_err2str(err)); + return err; + } + + err = avformat_write_header(ctx->avf, NULL); + if (err) { + av_log(ctx, AV_LOG_ERROR, "Couldn't write header: %s!\n", av_err2str(err)); + return err; + } + + return err; +} + +struct capture_context *q_ctx = NULL; + +void on_quit_signal(int signo) { + printf("\r"); + q_ctx->quit = 1; +} + +static int main_loop(struct capture_context *ctx) { + int err; + + q_ctx = ctx; + + if (signal(SIGINT, on_quit_signal) == SIG_ERR) { + av_log(ctx, AV_LOG_ERROR, "Unable to install signal handler!\n"); + return AVERROR(EINVAL); + } + + err = init_lavu_hwcontext(ctx); + if (err) + return err; + + err = init_encoding(ctx); + if (err) + return err; + + /* Start the frame callback */ + register_cb(ctx); + + while (!ctx->err && ctx->quit < 2) { + while (wl_display_prepare_read(ctx->display) != 0) { + wl_display_dispatch_pending(ctx->display); + } + + wl_display_flush(ctx->display); + + struct pollfd fds[1] = { + { .fd = wl_display_get_fd(ctx->display), .events = POLLIN }, + }; + + poll(fds, 1, -1); + + if (!(fds[0].revents & POLLIN)) { + wl_display_cancel_read(ctx->display); + } + + if (fds[0].revents & (POLLERR | POLLHUP | POLLNVAL)) { + av_log(ctx, AV_LOG_ERROR, "Error occurred on the display fd!\n"); + break; + } + + if (fds[0].revents & POLLIN) { + if (wl_display_read_events(ctx->display) < 0) { + av_log(ctx, AV_LOG_ERROR, "Failed to read Wayland events!\n"); + break; + } + wl_display_dispatch_pending(ctx->display); + } + } + + err = av_write_trailer(ctx->avf); + if (err) { + av_log(ctx, AV_LOG_ERROR, "Error writing trailer: %s!\n", + av_err2str(err)); + return err; + } + + av_log(ctx, AV_LOG_INFO, "Wrote trailer!\n"); + + return ctx->err; +} + +static int init(struct capture_context *ctx) { + ctx->display = wl_display_connect(NULL); + if (!ctx->display) { + av_log(ctx, AV_LOG_ERROR, "Failed to connect to display!\n"); + return AVERROR(EINVAL); + } + + wl_list_init(&ctx->output_list); + + ctx->registry = wl_display_get_registry(ctx->display); + wl_registry_add_listener(ctx->registry, ®istry_listener, ctx); + + wl_display_roundtrip(ctx->display); + wl_display_dispatch(ctx->display); + + if (!ctx->export_manager) { + av_log(ctx, AV_LOG_ERROR, "Compositor doesn't support %s!\n", + zwlr_export_dmabuf_manager_v1_interface.name); + return -1; + } + + return 0; +} + +static void print_capturable_surfaces(struct capture_context *ctx) { + + struct wayland_output *o, *tmp_o; + wl_list_for_each_reverse_safe(o, tmp_o, &ctx->output_list, link) { + ctx->target_output = o->output; /* Default is first, whatever */ + av_log(ctx, AV_LOG_INFO, "Capturable output: %s Model: %s:\n", + o->make, o->model); + } + + av_log(ctx, AV_LOG_INFO, "Capturing from output: %s!\n", + find_output(ctx, ctx->target_output, 0)->model); +} + +static void uninit(struct capture_context *ctx); + +int main(int argc, char *argv[]) { + int err; + struct capture_context ctx = { 0 }; + ctx.class = &((AVClass) { + .class_name = "dmabuf-capture", + .item_name = av_default_item_name, + .version = LIBAVUTIL_VERSION_INT, + }); + + err = init(&ctx); + if (err) + goto end; + + print_capturable_surfaces(&ctx); + + ctx.hw_device_type = av_hwdevice_find_type_by_name("vaapi"); + ctx.hardware_device = "/dev/dri/renderD128"; + + ctx.encoder_name = "libx264"; + ctx.software_format = av_get_pix_fmt("nv12"); + av_dict_set(&ctx.encoder_opts, "preset", "veryfast", 0); + + ctx.out_filename = "dmabuf_recording_01.mkv"; + ctx.out_bitrate = 29.2f; /* Mbps */ + + err = main_loop(&ctx); + if (err) + goto end; + +end: + uninit(&ctx); + return err; +} + +static void uninit(struct capture_context *ctx) { + struct wayland_output *output, *tmp_o; + wl_list_for_each_safe(output, tmp_o, &ctx->output_list, link) + remove_output(output); + + if (ctx->export_manager) + zwlr_export_dmabuf_manager_v1_destroy(ctx->export_manager); + + av_buffer_unref(&ctx->drm_frames_ref); + av_buffer_unref(&ctx->drm_device_ref); + av_buffer_unref(&ctx->mapped_frames_ref); + av_buffer_unref(&ctx->mapped_device_ref); + + av_dict_free(&ctx->encoder_opts); + + avcodec_close(ctx->avctx); + if (ctx->avf) { + avio_closep(&ctx->avf->pb); + } + avformat_free_context(ctx->avf); +} diff --git a/examples/meson.build b/examples/meson.build index 4725b989..6a0bc46c 100644 --- a/examples/meson.build +++ b/examples/meson.build @@ -1,6 +1,10 @@ threads = dependency('threads') wayland_cursor = dependency('wayland-cursor') +libavutil = dependency('libavutil') +libavcodec = dependency('libavcodec') +libavformat = dependency('libavformat') + executable('simple', 'simple.c', dependencies: wlroots) executable('pointer', 'pointer.c', dependencies: wlroots) executable('touch', 'touch.c', 'cat.c', dependencies: wlroots) @@ -38,3 +42,9 @@ executable( 'input-inhibitor.c', dependencies: [wayland_cursor, wayland_client, wlr_protos, wlroots] ) + +executable( + 'dmabuf-capture', + 'dmabuf-capture.c', + dependencies: [wayland_client, wlr_protos, libavutil, libavcodec, libavformat] +) diff --git a/protocol/meson.build b/protocol/meson.build index a14e9723..ca0d82b5 100644 --- a/protocol/meson.build +++ b/protocol/meson.build @@ -50,6 +50,7 @@ client_protocols = [ [wl_protocol_dir, 'unstable/idle-inhibit/idle-inhibit-unstable-v1.xml'], 'idle.xml', 'screenshooter.xml', + 'wlr-export-dmabuf-unstable-v1.xml', 'wlr-layer-shell-unstable-v1.xml', 'wlr-input-inhibitor-unstable-v1.xml', ] -- cgit v1.2.3 From 9eddcbc376ff92a3a03002b910c31bf96bdba2da Mon Sep 17 00:00:00 2001 From: Rostislav Pehlivanov Date: Sun, 17 Jun 2018 14:06:52 +0100 Subject: Update example and protocol --- examples/dmabuf-capture.c | 28 ++--- protocol/wlr-export-dmabuf-unstable-v1.xml | 180 +++++++++++++++-------------- 2 files changed, 102 insertions(+), 106 deletions(-) (limited to 'protocol') diff --git a/examples/dmabuf-capture.c b/examples/dmabuf-capture.c index cd8a9267..2f7db2f1 100644 --- a/examples/dmabuf-capture.c +++ b/examples/dmabuf-capture.c @@ -163,8 +163,7 @@ static void frame_free(void *opaque, uint8_t *data) { static void frame_start(void *data, struct zwlr_export_dmabuf_frame_v1 *frame, uint32_t width, uint32_t height, uint32_t offset_x, uint32_t offset_y, uint32_t buffer_flags, uint32_t flags, uint32_t format, - uint32_t mod_high, uint32_t mod_low, uint32_t num_objects, - uint32_t num_planes) { + uint32_t mod_high, uint32_t mod_low, uint32_t num_objects) { struct capture_context *ctx = data; int err = 0; @@ -180,7 +179,6 @@ static void frame_start(void *data, struct zwlr_export_dmabuf_frame_v1 *frame, desc->nb_layers = 1; desc->layers[0].format = format; - desc->layers[0].nb_planes = num_planes; /* Allocate a frame */ AVFrame *f = av_frame_alloc(); @@ -213,25 +211,18 @@ fail: } static void frame_object(void *data, struct zwlr_export_dmabuf_frame_v1 *frame, - uint32_t index, int32_t fd, uint32_t size) { + uint32_t index, int32_t fd, uint32_t size, uint32_t offset, + uint32_t stride, uint32_t plane_index) { struct capture_context *ctx = data; AVFrame *f = ctx->current_frame; AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor *)f->data[0]; desc->objects[index].fd = fd; desc->objects[index].size = size; -} - -static void frame_plane(void *data, struct zwlr_export_dmabuf_frame_v1 *frame, - uint32_t index, uint32_t object_index, - uint32_t offset, uint32_t stride) { - struct capture_context *ctx = data; - AVFrame *f = ctx->current_frame; - AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor *)f->data[0]; - desc->layers[0].planes[index].object_index = object_index; - desc->layers[0].planes[index].offset = offset; - desc->layers[0].planes[index].pitch = stride; + desc->layers[0].planes[plane_index].object_index = index; + desc->layers[0].planes[plane_index].offset = offset; + desc->layers[0].planes[plane_index].pitch = stride; } static const uint32_t pixfmt_to_drm_map[] = { @@ -311,14 +302,18 @@ static void frame_ready(void *data, struct zwlr_export_dmabuf_frame_v1 *frame, struct capture_context *ctx = data; AVFrame *f = ctx->current_frame; AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor *)f->data[0]; + enum AVPixelFormat pix_fmt = drm_fmt_to_pixfmt(desc->layers[0].format); int err = 0; /* Attach the hardware frame context to the frame */ - err = attach_drm_frames_ref(ctx, f, drm_fmt_to_pixfmt(desc->layers[0].format)); + err = attach_drm_frames_ref(ctx, f, pix_fmt); if (err) { goto end; } + /* TODO: support multiplane stuff */ + desc->layers[0].nb_planes = av_pix_fmt_count_planes(pix_fmt); + AVFrame *mapped_frame = av_frame_alloc(); if (!mapped_frame) { err = AVERROR(ENOMEM); @@ -431,7 +426,6 @@ static void frame_cancel(void *data, struct zwlr_export_dmabuf_frame_v1 *frame, static const struct zwlr_export_dmabuf_frame_v1_listener frame_listener = { .frame = frame_start, .object = frame_object, - .plane = frame_plane, .ready = frame_ready, .cancel = frame_cancel, }; diff --git a/protocol/wlr-export-dmabuf-unstable-v1.xml b/protocol/wlr-export-dmabuf-unstable-v1.xml index 760345a7..751f7efb 100644 --- a/protocol/wlr-export-dmabuf-unstable-v1.xml +++ b/protocol/wlr-export-dmabuf-unstable-v1.xml @@ -1,6 +1,5 @@ - Copyright © 2018 Rostislav Pehlivanov @@ -25,29 +24,56 @@ - An interface to capture surfaces in an efficient way. - Overall usage: - - 1.) client registers with zwlr_screencontent_manager_v1 - 2.) server sends client info about surfaces via "receive_surface_info" - 3.) client subscribes to capture a surface via the "capture" requests - 4.) server sends client events via the "zwlr_screencontent_frame" interface - 5.) client finishes and informs server via the "frame_destroy" event - 6.) client optionally resubscribes via repeating steps 3.) through 5.) + An interface to capture surfaces in an efficient way by exporting DMA-BUFs. + + Warning! The protocol described in this file is experimental and + backward incompatible changes may be made. Backward compatible changes + may be added together with the corresponding interface version bump. + Backward incompatible changes are done by bumping the version number in + the protocol and interface names and resetting the interface version. + Once the protocol is to be declared stable, the 'z' prefix and the + version number in the protocol and interface names are removed and the + interface version number is reset. + + + This object is a manager with which to start capturing from sources. + + + + + Capture the next frame of a an entire output. + + + + + + + + + All objects created by the manager will still remain valid, until their + appropriate destroy request has been called. + + + + - - This object represents a frame which is ready to have its resources - fetched and used. - - The receive callback shall be called first, followed by the "object" - callback once per dmabuf object or the "plane" callback, once per dmabuf - plane. The "ready" event is called last to indicate that all the data has - been made available for readout, as well as the time at which presentation - happened at. The ownership of the frame is passed to the client, who's - responsible for destroying it via the "destroy" event once finished and - by calling close() on the file descriptors received. + + This object represents a single DMA-BUF frame. + + If the capture is successful, the compositor will first send a "frame" + event, followed by one or several "object". When the frame is available + for readout, the "ready" event is sent. + + If the capture failed, the "cancel" event is sent. This can happen anytime + before the "ready" event. + + Once either a "ready" or a "cancel" event is received, the client should + destroy the frame. Once an "object" event is received, the client is + responsible for closing the associated file descriptor. + All frames are read-only and may not be written into or altered. @@ -55,25 +81,23 @@ Special flags that should be respected by the client. - - - Main callback supplying the client with information about the frame, - as well as an object to serve as context for destruction. Always called - first before any other events. - - The "transform" argument describes the orientation needed to be applied - to correctly orient the buffer. For example, a buffer rotated by 90 - degrees will have a value of "3" here, corresponding to the need to - apply a 270 degree transpose to correctly present the buffer. + + Main event supplying the client with information about the frame. If the + capture didn't fail, this event is always emitted first before any other + events. + + This event is followed by a number of "object" as specified by the + "num_objects" argument. + summary="frame width in pixels"/> + summary="frame height in pixels"/> - + - - Callback which serves to supply the client with the file descriptors + + Event which serves to supply the client with the file descriptors containing the data for each object. + + After receiving this event, the client must always close the file + descriptor as soon as they're done with it and even if the frame fails. @@ -105,31 +131,28 @@ summary="fd of the current object"/> - - - - Callback which supplies the client with plane information for each - plane. - - - + + - Called as soon as the frame is presented, indicating it is available - for reading. + This event is sent as soon as the frame is presented, indicating it is + available for reading. This event includes the time at which + presentation happened at. + The timestamp is expressed as tv_sec_hi, tv_sec_lo, tv_nsec triples, each component being an unsigned 32-bit value. Whole seconds are in tv_sec which is a 64-bit value combined from tv_sec_hi and tv_sec_lo, and the additional fractional part in tv_nsec as nanoseconds. Hence, - for valid timestamps tv_nsec must be in [0, 999999999]. - The seconds part may have an arbitrary offset at start. + for valid timestamps tv_nsec must be in [0, 999999999]. The seconds part + may have an arbitrary offset at start. + + After receiving this event, the client should destroy this object. @@ -143,22 +166,25 @@ Indicates reason for cancelling the frame. - - - + - If the frame is no longer valid after the "frame" event has been called, - this callback will be used to inform the client to scrap the frame. - Source is still valid for as long as the subscription function does not - return NULL. - This may get called if for instance the surface is in the process of - resizing. + If the capture failed or if the frame is no longer valid after the + "frame" event has been emitted, this event will be used to inform the + client to scrap the frame. + + If the failure is temporary, the client may capture again the same + source. If the failure is permanent, any further attempts to capture the + same source will fail again. + + After receiving this event, the client should destroy this object. @@ -166,35 +192,11 @@ - Unreferences the frame, allowing it to be reused. Must be called as soon - as its no longer used. - Can be called at any time by the client after the "frame" event, after - which the compositor will not call any other events unless the client - resubscribes to capture more. The client will still have to close any - FDs it has been given. - - - + Unreferences the frame. This request must be called as soon as its no + longer used. - - - This object is a manager with which to start capturing from sources. - - - - - Request to start capturing from an entire wl_output. - - - - - - - - All objects created by the manager will still remain valid, until their - appropriate destroy request has been called. + It can be called at any time by the client. The client will still have + to close any FDs it has been given. -- cgit v1.2.3