plugins/screencast: Add PipeWire explicit sync support

This change adds explicit sync support to screencasting. If KWin and the
client successfully negotiate to use explicit sync, KWin adds two
additional blocks to each PipeWire buffer to store file descriptors for
the acquire and release syncobjs. In this implementation, these are the
same syncobjs. The timeline points that the client needs to wait and
signal are set by KWin and shared with the client using a new SPA
metadata type.

The enum describing the type of the PipeWire buffer blocks that contain
syncobj file descriptors and the metadata struct describing the timeline
points are added in PipeWire 1.2.0. So, this change bumps up the
required PipeWire version. However, this change does not break
compatibility with older versions of PipeWire.
wilder/Plasma/6.3
Doğukan Korkmaztürk 2 years ago
parent 520814ca44
commit a7c1555a02
  1. 2
      CMakeLists.txt
  2. 31
      src/plugins/screencast/screencastbuffer.cpp
  3. 4
      src/plugins/screencast/screencastbuffer.h
  4. 151
      src/plugins/screencast/screencaststream.cpp
  5. 2
      src/plugins/screencast/screencaststream.h

@ -399,7 +399,7 @@ if (NOT libdisplayinfo_FOUND)
endif()
add_feature_info(libdisplayinfo libdisplayinfo_FOUND "EDID and DisplayID library: https://gitlab.freedesktop.org/emersion/libdisplay-info")
pkg_check_modules(PipeWire IMPORTED_TARGET libpipewire-0.3>=0.3.65)
pkg_check_modules(PipeWire IMPORTED_TARGET libpipewire-0.3>=1.2.0)
add_feature_info(PipeWire PipeWire_FOUND "Required for Wayland screencasting")
if (KWIN_BUILD_NOTIFICATIONS)

@ -24,10 +24,11 @@ ScreenCastBuffer::~ScreenCastBuffer()
m_buffer->drop();
}
DmaBufScreenCastBuffer::DmaBufScreenCastBuffer(GraphicsBuffer *buffer, std::shared_ptr<GLTexture> &&texture, std::unique_ptr<GLFramebuffer> &&framebuffer)
DmaBufScreenCastBuffer::DmaBufScreenCastBuffer(GraphicsBuffer *buffer, std::shared_ptr<GLTexture> &&texture, std::unique_ptr<GLFramebuffer> &&framebuffer, std::unique_ptr<SyncTimeline> &&synctimeline)
: ScreenCastBuffer(buffer)
, texture(std::move(texture))
, framebuffer(std::move(framebuffer))
, synctimeline(std::move(synctimeline))
{
}
@ -49,7 +50,8 @@ DmaBufScreenCastBuffer *DmaBufScreenCastBuffer::create(pw_buffer *pwBuffer, cons
return nullptr;
}
if (pwBuffer->buffer->n_datas != uint32_t(attrs->planeCount)) {
const void *syncTimelineMeta = spa_buffer_find_meta_data(pwBuffer->buffer, SPA_META_SyncTimeline, sizeof(spa_meta_sync_timeline));
if (pwBuffer->buffer->n_datas != uint32_t(attrs->planeCount + (syncTimelineMeta ? 2 : 0))) {
buffer->drop();
return nullptr;
}
@ -82,7 +84,30 @@ DmaBufScreenCastBuffer *DmaBufScreenCastBuffer::create(pw_buffer *pwBuffer, cons
spaData[i].chunk->flags = SPA_CHUNK_FLAG_NONE;
};
return new DmaBufScreenCastBuffer(buffer, std::move(texture), std::move(framebuffer));
std::unique_ptr<SyncTimeline> synctimeline;
if (syncTimelineMeta) {
synctimeline = std::make_unique<SyncTimeline>(backend->drmDevice()->fileDescriptor());
const FileDescriptor &syncobjfd = synctimeline->fileDescriptor();
if (!syncobjfd.isValid()) {
buffer->drop();
return nullptr;
}
// Signal the first timeline point, so the very first recording can proceed.
synctimeline->signal(0);
spa_data &acquireData = spaData[attrs->planeCount];
acquireData.type = SPA_DATA_SyncObj;
acquireData.flags = SPA_DATA_FLAG_READABLE;
acquireData.fd = syncobjfd.get();
spa_data &releaseData = spaData[attrs->planeCount + 1];
releaseData.type = SPA_DATA_SyncObj;
releaseData.flags = SPA_DATA_FLAG_READABLE;
releaseData.fd = syncobjfd.get();
}
return new DmaBufScreenCastBuffer(buffer, std::move(texture), std::move(framebuffer), std::move(synctimeline));
}
MemFdScreenCastBuffer::MemFdScreenCastBuffer(GraphicsBuffer *buffer, GraphicsBufferView &&view)

@ -7,6 +7,7 @@
#pragma once
#include "core/graphicsbufferview.h"
#include "core/syncobjtimeline.h"
#include <pipewire/pipewire.h>
@ -35,9 +36,10 @@ public:
std::shared_ptr<GLTexture> texture;
std::unique_ptr<GLFramebuffer> framebuffer;
std::unique_ptr<SyncTimeline> synctimeline;
private:
DmaBufScreenCastBuffer(GraphicsBuffer *buffer, std::shared_ptr<GLTexture> &&texture, std::unique_ptr<GLFramebuffer> &&framebuffer);
DmaBufScreenCastBuffer(GraphicsBuffer *buffer, std::shared_ptr<GLTexture> &&texture, std::unique_ptr<GLFramebuffer> &&framebuffer, std::unique_ptr<SyncTimeline> &&synctimeline);
};
class MemFdScreenCastBuffer : public ScreenCastBuffer

@ -14,6 +14,7 @@
#include "cursor.h"
#include "kwinscreencast_logging.h"
#include "main.h"
#include "opengl/eglnativefence.h"
#include "opengl/glplatform.h"
#include "opengl/gltexture.h"
#include "opengl/glutils.h"
@ -31,6 +32,7 @@
#include <libdrm/drm_fourcc.h>
#include <spa/buffer/meta.h>
#include <spa/pod/dynamic.h>
namespace KWin
{
@ -150,44 +152,69 @@ static const int videoDamageRegionCount = 16;
void ScreenCastStream::newStreamParams()
{
qCDebug(KWIN_SCREENCAST) << objectName() << "announcing stream params. with dmabuf:" << m_dmabufParams.has_value();
uint8_t paramsBuffer[1024];
spa_pod_builder pod_builder = SPA_POD_BUILDER_INIT(paramsBuffer, sizeof(paramsBuffer));
const int buffertypes = m_dmabufParams ? (1 << SPA_DATA_DmaBuf) : (1 << SPA_DATA_MemFd);
const int bpp = m_videoFormat.format == SPA_VIDEO_FORMAT_RGB || m_videoFormat.format == SPA_VIDEO_FORMAT_BGR ? 3 : 4;
const int stride = SPA_ROUND_UP_N(m_resolution.width() * bpp, 4);
struct spa_pod_dynamic_builder pod_builder;
struct spa_pod_frame f;
spa_pod_builder_push_object(&pod_builder, &f, SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers);
spa_pod_builder_add(&pod_builder,
spa_pod_dynamic_builder_init(&pod_builder, nullptr, 0, 1024);
QVarLengthArray<const spa_pod *> params;
// Buffer parameters for explicit sync. It requires two extra blocks to hold acquire and
// release syncobjs.
if (m_dmabufParams) {
spa_pod_builder_push_object(&pod_builder.b, &f, SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers);
spa_pod_builder_add(&pod_builder.b,
SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(3, 2, 4),
SPA_PARAM_BUFFERS_dataType, SPA_POD_CHOICE_FLAGS_Int(buffertypes),
SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(m_dmabufParams->planeCount + 2), 0);
spa_pod_builder_prop(&pod_builder.b, SPA_PARAM_BUFFERS_metaType, SPA_POD_PROP_FLAG_MANDATORY);
spa_pod_builder_int(&pod_builder.b, 1 << SPA_META_SyncTimeline);
params.append((spa_pod *)spa_pod_builder_pop(&pod_builder.b, &f));
}
// Fallback buffer parameters for DmaBuf with implicit sync or MemFd
spa_pod_builder_push_object(&pod_builder.b, &f, SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers);
spa_pod_builder_add(&pod_builder.b,
SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(3, 2, 4),
SPA_PARAM_BUFFERS_dataType, SPA_POD_CHOICE_FLAGS_Int(buffertypes), 0);
if (!m_dmabufParams) {
spa_pod_builder_add(&pod_builder,
spa_pod_builder_add(&pod_builder.b,
SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(1),
SPA_PARAM_BUFFERS_size, SPA_POD_Int(stride * m_resolution.height()),
SPA_PARAM_BUFFERS_stride, SPA_POD_Int(stride),
SPA_PARAM_BUFFERS_align, SPA_POD_Int(16), 0);
} else {
spa_pod_builder_add(&pod_builder,
spa_pod_builder_add(&pod_builder.b,
SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(m_dmabufParams->planeCount), 0);
}
spa_pod *bufferPod = (spa_pod *)spa_pod_builder_pop(&pod_builder, &f);
params.append((spa_pod *)spa_pod_builder_pop(&pod_builder.b, &f));
QVarLengthArray<const spa_pod *> params = {
bufferPod,
(spa_pod *)spa_pod_builder_add_object(&pod_builder,
// Metadata parameters
params.append(
(spa_pod *)spa_pod_builder_add_object(&pod_builder.b,
SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Cursor),
SPA_PARAM_META_size, SPA_POD_Int(CURSOR_META_SIZE(m_cursor.bitmapSize.width(), m_cursor.bitmapSize.height()))),
(spa_pod *)spa_pod_builder_add_object(&pod_builder,
SPA_PARAM_META_size, SPA_POD_Int(CURSOR_META_SIZE(m_cursor.bitmapSize.width(), m_cursor.bitmapSize.height()))));
params.append(
(spa_pod *)spa_pod_builder_add_object(&pod_builder.b,
SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoDamage),
SPA_PARAM_META_size, SPA_POD_CHOICE_RANGE_Int(sizeof(struct spa_meta_region) * videoDamageRegionCount, sizeof(struct spa_meta_region) * 1, sizeof(struct spa_meta_region) * videoDamageRegionCount)),
(spa_pod *)spa_pod_builder_add_object(&pod_builder,
SPA_PARAM_META_size, SPA_POD_CHOICE_RANGE_Int(sizeof(struct spa_meta_region) * videoDamageRegionCount, sizeof(struct spa_meta_region) * 1, sizeof(struct spa_meta_region) * videoDamageRegionCount)));
params.append(
(spa_pod *)spa_pod_builder_add_object(&pod_builder.b,
SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Header),
SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_header))),
};
SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_header))));
if (m_dmabufParams) {
params.append(
(spa_pod *)spa_pod_builder_add_object(&pod_builder.b,
SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
SPA_PARAM_META_type, SPA_POD_Id(SPA_META_SyncTimeline),
SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_sync_timeline))));
}
pw_stream_update_params(m_pwStream, params.data(), params.count());
}
@ -286,6 +313,8 @@ void ScreenCastStream::onStreamRemoveBuffer(pw_buffer *pwBuffer)
delete buffer;
pwBuffer->user_data = nullptr;
}
m_dequeuedBuffers.removeOne(pwBuffer);
}
ScreenCastStream::ScreenCastStream(ScreenCastSource *source, std::shared_ptr<PipeWireCore> pwCore, QObject *parent)
@ -490,6 +519,57 @@ void ScreenCastStream::scheduleRecord(const QRegion &damage, Contents contents)
record(damage, contents);
}
pw_buffer *ScreenCastStream::dequeueBuffer()
{
const auto isBufferUsable = [](pw_buffer *pwBuffer) {
const spa_buffer *spaBuffer = pwBuffer->buffer;
const spa_data *spaData = spaBuffer->datas;
if (spaData[0].type != SPA_DATA_DmaBuf) {
return true;
}
auto dmabuf = static_cast<DmaBufScreenCastBuffer *>(pwBuffer->user_data);
if (dmabuf && dmabuf->synctimeline) {
spa_meta_sync_timeline *synctmeta =
static_cast<spa_meta_sync_timeline *>(spa_buffer_find_meta_data(spaBuffer,
SPA_META_SyncTimeline,
sizeof(spa_meta_sync_timeline)));
return dmabuf->synctimeline->isMaterialized(synctmeta->release_point);
}
return true;
};
// First, search the list of already dequeued buffers
auto foundBuffer = std::find_if(m_dequeuedBuffers.begin(), m_dequeuedBuffers.end(), isBufferUsable);
if (foundBuffer != m_dequeuedBuffers.end()) {
pw_buffer *pwBuffer = *foundBuffer;
m_dequeuedBuffers.erase(foundBuffer);
return pwBuffer;
}
// If we do not have a usable dequeued buffer, fetch a new one from the stream
pw_buffer *pwBuffer = pw_stream_dequeue_buffer(m_pwStream);
if (!pwBuffer) {
return nullptr;
}
if (!pwBuffer->user_data) {
qCWarning(KWIN_SCREENCAST) << objectName() << "Received stream buffer that does not contain user data";
corruptHeader(pwBuffer->buffer);
pw_stream_queue_buffer(m_pwStream, pwBuffer);
return nullptr;
}
if (!isBufferUsable(pwBuffer)) {
m_dequeuedBuffers.append(pwBuffer);
return nullptr;
}
return pwBuffer;
}
void ScreenCastStream::record(const QRegion &damage, Contents contents)
{
AbstractEglBackend *backend = qobject_cast<AbstractEglBackend *>(Compositor::self()->backend());
@ -497,7 +577,7 @@ void ScreenCastStream::record(const QRegion &damage, Contents contents)
return;
}
struct pw_buffer *pwBuffer = pw_stream_dequeue_buffer(m_pwStream);
struct pw_buffer *pwBuffer = dequeueBuffer();
if (!pwBuffer) {
return;
}
@ -506,12 +586,6 @@ void ScreenCastStream::record(const QRegion &damage, Contents contents)
struct spa_data *spa_data = spa_buffer->datas;
ScreenCastBuffer *buffer = static_cast<ScreenCastBuffer *>(pwBuffer->user_data);
if (!buffer) {
qCWarning(KWIN_SCREENCAST) << objectName() << "Failed to record frame: invalid buffer type";
corruptHeader(spa_buffer);
pw_stream_queue_buffer(m_pwStream, pwBuffer);
return;
}
Contents effectiveContents = contents;
if (m_cursor.mode != ScreencastV1Interface::Hidden) {
@ -524,10 +598,23 @@ void ScreenCastStream::record(const QRegion &damage, Contents contents)
EglContext *context = backend->openglContext();
context->makeCurrent();
spa_meta_sync_timeline *synctmeta = nullptr;
if (effectiveContents & Content::Video) {
if (auto memfd = dynamic_cast<MemFdScreenCastBuffer *>(buffer)) {
m_source->render(memfd->view.image());
} else if (auto dmabuf = dynamic_cast<DmaBufScreenCastBuffer *>(buffer)) {
if (dmabuf->synctimeline) {
synctmeta = static_cast<spa_meta_sync_timeline *>(spa_buffer_find_meta_data(spa_buffer,
SPA_META_SyncTimeline,
sizeof(spa_meta_sync_timeline)));
FileDescriptor syncFileFd = dmabuf->synctimeline->exportSyncFile(synctmeta->release_point);
EGLNativeFence fence = EGLNativeFence::importFence(backend->eglDisplayObject(), std::move(syncFileFd));
if (fence.waitSync() != EGL_TRUE) {
qCWarning(KWIN_SCREENCAST) << objectName() << "Failed to wait on a fence, recording may be corrupted";
}
}
m_source->render(dmabuf->framebuffer.get());
}
}
@ -547,11 +634,21 @@ void ScreenCastStream::record(const QRegion &damage, Contents contents)
}
}
// Implicit sync is broken on Nvidia and with llvmpipe
if (context->glPlatform()->isNvidia() || context->isSoftwareRenderer()) {
glFinish();
if (synctmeta) {
EGLNativeFence fence(backend->eglDisplayObject());
synctmeta->acquire_point = synctmeta->release_point + 1;
synctmeta->release_point = synctmeta->acquire_point + 1;
auto dmabuf = static_cast<DmaBufScreenCastBuffer *>(buffer);
dmabuf->synctimeline->moveInto(synctmeta->acquire_point, fence.takeFileDescriptor());
} else {
glFlush();
// Implicit sync is broken on Nvidia and with llvmpipe
if (context->glPlatform()->isNvidia() || context->isSoftwareRenderer()) {
glFinish();
} else {
glFlush();
}
}
addDamage(spa_buffer, effectiveDamage);

@ -97,6 +97,7 @@ private:
spa_pod *buildFormat(struct spa_pod_builder *b, enum spa_video_format format, struct spa_rectangle *resolution,
struct spa_fraction *defaultFramerate, struct spa_fraction *minFramerate, struct spa_fraction *maxFramerate,
const QList<uint64_t> &modifiers, quint32 modifiersFlags);
pw_buffer *dequeueBuffer();
void record(const QRegion &damage, Contents contents);
std::optional<ScreenCastDmaBufTextureParams> testCreateDmaBuf(const QSize &size, quint32 format, const QList<uint64_t> &modifiers);
@ -137,6 +138,7 @@ private:
QRegion m_pendingDamage;
QTimer m_pendingFrame;
Contents m_pendingContents = Content::None;
QList<pw_buffer *> m_dequeuedBuffers;
};
} // namespace KWin

Loading…
Cancel
Save