gpu: actually generalize `pl_tex_export`

We accidentally implemented this inside vulkan/gpu.c instead of the
general purpose wrapper code. That also meant we never tested the
`tex->params.export_handle` condition.

However, I realized that this condition is actually too restrictive for
our test framework, and working around it there would be sort of
annoying. So just drop the restriction.

I won't bother updating the API version for this change, since the
actual behavior hasn't changed. (And even if it had, it would only
matter for our own test framework)

As an aside, fix a bunch of related comments that still had outdated
field names in the documentation.
parent 70946fc9
Pipeline #3829 passed with stages
in 2 minutes and 4 seconds
......@@ -1387,3 +1387,9 @@ void pl_sync_destroy(const struct pl_gpu *gpu,
gpu->impl->sync_destroy(gpu, *sync);
*sync = NULL;
}
bool pl_tex_export(const struct pl_gpu *gpu, const struct pl_tex *tex,
const struct pl_sync *sync)
{
return gpu->impl->tex_export(gpu, tex, sync);
}
......@@ -39,12 +39,13 @@ struct pl_gpu_fns {
GPU_PFN(buf_create);
GPU_PFN(buf_write);
GPU_PFN(buf_read);
GPU_PFN(buf_export); // optional if !gpu->handle_caps.shared_mem
GPU_PFN(buf_export); // optional if !gpu->export_caps.buf
GPU_PFN(buf_poll); // optional: if NULL buffers are always free to use
GPU_PFN(desc_namespace);
GPU_PFN(pass_create);
GPU_PFN(pass_run);
GPU_PFN(sync_create); // optional if !gpu->handle_caps.sync
GPU_PFN(sync_create); // optional if !gpu->export_caps.sync
GPU_PFN(tex_export); // optional if !gpu->export_caps.sync
GPU_PFN(gpu_flush); // optional
GPU_PFN(gpu_finish);
};
......
......@@ -461,7 +461,7 @@ struct pl_buf_params {
// Setting this indicates that the memory backing this buffer should be
// shared with external APIs, If so, this must be exactly *one* of
// `pl_gpu.handle_caps.buf`.
// `pl_gpu.export_caps.buf`.
enum pl_handle_type handle_type;
// If non-NULL, the buffer will be created with these contents. Otherwise,
......@@ -965,7 +965,7 @@ struct pl_sync {
// Create a synchronization object. Returns NULL on failure.
//
// `handle_type` must be exactly *one* of `pl_gpu.handle_caps.sync`, and
// `handle_type` must be exactly *one* of `pl_gpu.export_caps.sync`, and
// indicates which type of handle to generate for sharing this sync object.
const struct pl_sync *pl_sync_create(const struct pl_gpu *gpu,
enum pl_handle_type handle_type);
......@@ -981,11 +981,10 @@ void pl_sync_destroy(const struct pl_gpu *gpu,
const struct pl_sync **sync);
// Initiates a texture export operation, allowing a texture to be accessed by
// an external API. This is only valid for textures with `params.handle_type`.
// Returns whether successful. After this operation successfully returns, it is
// guaranteed that `sync->wait_handle` will eventually be signalled. For APIs
// where this is relevant, the image layout should be specified as "general",
// e.g. `GL_LAYOUT_GENERAL_EXT` for OpenGL.
// an external API. Returns whether successful. After this operation
// successfully returns, it is guaranteed that `sync->wait_handle` will
// eventually be signalled. For APIs where this is relevant, the image layout
// should be specified as "general", e.g. `GL_LAYOUT_GENERAL_EXT` for OpenGL.
//
// There is no corresponding "import" operation - the next operation that uses
// a texture will implicitly import the texture. Valid API usage requires that
......
......@@ -2774,8 +2774,8 @@ void pl_vk_sync_unwrap(const struct pl_sync *sync, VkSemaphore *out_wait,
*out_signal = sync_vk->signal;
}
bool pl_tex_export(const struct pl_gpu *gpu, const struct pl_tex *tex,
const struct pl_sync *sync)
static bool vk_tex_export(const struct pl_gpu *gpu, const struct pl_tex *tex,
const struct pl_sync *sync)
{
struct pl_vk *p = gpu->priv;
struct vk_ctx *vk = pl_vk_get(gpu);
......@@ -2848,6 +2848,7 @@ static struct pl_gpu_fns pl_fns_vk = {
.pass_run = vk_pass_run,
.sync_create = vk_sync_create,
.sync_destroy = vk_sync_deref,
.tex_export = vk_tex_export,
.gpu_flush = vk_gpu_flush,
.gpu_finish = vk_gpu_finish,
};
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment