gpu.h 4.96 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 * This file is part of libplacebo.
 *
 * libplacebo is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * libplacebo is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with libplacebo.  If not, see <http://www.gnu.org/licenses/>.
 */

#pragma once

#include "common.h"

#define GPU_PFN(name) __typeof__(pl_##name) *name
struct pl_gpu_fns {
    // Destructors: These also free the corresponding objects, but they
    // must not be called on NULL. (The NULL checks are done by the pl_*_destroy
    // wrappers)
    void (*destroy)(const struct pl_gpu *gpu);
    void (*tex_destroy)(const struct pl_gpu *, const struct pl_tex *);
    void (*buf_destroy)(const struct pl_gpu *, const struct pl_buf *);
    void (*pass_destroy)(const struct pl_gpu *, const struct pl_pass *);
31
    void (*sync_destroy)(const struct pl_gpu *, const struct pl_sync *);
32 33

    GPU_PFN(tex_create);
34
    GPU_PFN(tex_invalidate); // optional
35 36 37 38 39 40 41
    GPU_PFN(tex_clear);
    GPU_PFN(tex_blit);
    GPU_PFN(tex_upload);
    GPU_PFN(tex_download);
    GPU_PFN(buf_create);
    GPU_PFN(buf_write);
    GPU_PFN(buf_read);
42
    GPU_PFN(buf_export); // optional if !gpu->handle_caps.shared_mem
43 44 45 46
    GPU_PFN(buf_poll); // optional: if NULL buffers are always free to use
    GPU_PFN(desc_namespace);
    GPU_PFN(pass_create);
    GPU_PFN(pass_run);
47
    GPU_PFN(sync_create); // optional if !gpu->handle_caps.sync
48
    GPU_PFN(gpu_flush); // optional
Niklas Haas's avatar
Niklas Haas committed
49
    GPU_PFN(gpu_finish);
50 51 52 53 54 55 56
};
#undef GPU_PFN

// All resources such as textures and buffers allocated from the GPU must be
// destroyed before calling pl_destroy.
void pl_gpu_destroy(const struct pl_gpu *gpu);

57 58 59 60 61 62 63
// Returns true if the device supports interop. This is considered to be
// the case if at least one of `gpu->handle_caps` is nonzero.
static inline bool pl_gpu_supports_interop(const struct pl_gpu *gpu)
{
    return gpu->handle_caps.shared_mem || gpu->handle_caps.sync;
}

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
// GPU-internal helpers: these should not be used outside of GPU implementations

// Log some metadata about the created GPU
void pl_gpu_print_info(const struct pl_gpu *gpu, enum pl_log_level lev);

// Sort the pl_format list into an optimal order. This tries to prefer formats
// supporting more capabilities, while also trying to maintain a sane order
// in terms of bit depth / component index.
void pl_gpu_sort_formats(struct pl_gpu *gpu);

// Pretty-print the format list
void pl_gpu_print_formats(const struct pl_gpu *gpu, enum pl_log_level lev);

// Look up the right GLSL image format qualifier from a partially filled-in
// pl_fmt, or NULL if the format does not have a legal matching GLSL name.
79 80 81
//
// Warning: If `fmt->emulated` is true, this function makes the hard assumption
// that 3-channel formats are being emulated as equivalent 4-channel formats!
82 83 84 85 86
const char *pl_fmt_glsl_format(const struct pl_fmt *fmt);

// Compute the total size (in bytes) of a texture transfer operation
size_t pl_tex_transfer_size(const struct pl_tex_transfer_params *par);

87 88 89
// A hard-coded upper limit on a pl_buf_pool's size, to prevent OOM loops
#define PL_BUF_POOL_MAX_BUFFERS 8

90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
// A pool of buffers, which can grow as needed
struct pl_buf_pool {
    struct pl_buf_params current_params;
    const struct pl_buf **buffers;
    int num_buffers;
    int index;
};

void pl_buf_pool_uninit(const struct pl_gpu *gpu, struct pl_buf_pool *pool);

// Note: params->initial_data is *not* supported
const struct pl_buf *pl_buf_pool_get(const struct pl_gpu *gpu,
                                     struct pl_buf_pool *pool,
                                     const struct pl_buf_params *params);

// Helper that wraps pl_tex_upload/download using texture upload buffers to
// ensure that params->buf is always set.
bool pl_tex_upload_pbo(const struct pl_gpu *gpu, struct pl_buf_pool *pbo,
                       const struct pl_tex_transfer_params *params);
bool pl_tex_download_pbo(const struct pl_gpu *gpu, struct pl_buf_pool *pbo,
                         const struct pl_tex_transfer_params *params);

112 113 114 115 116 117
// This requires that params.buf has been set and is of type PL_BUF_TEXEL_*
bool pl_tex_upload_texel(const struct pl_gpu *gpu, struct pl_dispatch *dp,
                         const struct pl_tex_transfer_params *params);
bool pl_tex_download_texel(const struct pl_gpu *gpu, struct pl_dispatch *dp,
                           const struct pl_tex_transfer_params *params);

118 119 120 121
// Make a deep-copy of the pass params. Note: cached_program etc. are not
// copied, but cleared explicitly.
struct pl_pass_params pl_pass_params_copy(void *tactx,
                                          const struct pl_pass_params *params);