lib.c 19.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2018, VideoLAN and dav1d authors
 * Copyright © 2018, Two Orioles, LLC
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * 1. Redistributions of source code must retain the above copyright notice, this
 *    list of conditions and the following disclaimer.
 *
 * 2. Redistributions in binary form must reproduce the above copyright notice,
 *    this list of conditions and the following disclaimer in the documentation
 *    and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

#include "config.h"
29
#include "vcs_version.h"
30 31 32 33 34 35 36 37 38 39 40

#include <errno.h>
#include <string.h>

#include "dav1d/dav1d.h"
#include "dav1d/data.h"

#include "common/mem.h"
#include "common/validate.h"

#include "src/internal.h"
41
#include "src/log.h"
42 43 44 45 46
#include "src/obu.h"
#include "src/qm.h"
#include "src/ref.h"
#include "src/thread_task.h"
#include "src/wedge.h"
47
#include "src/film_grain.h"
48

49
static void init_internal(void) {
50 51 52
    dav1d_init_wedge_masks();
    dav1d_init_interintra_masks();
    dav1d_init_qm_tables();
53 54 55 56 57 58
}

const char *dav1d_version(void) {
    return DAV1D_VERSION;
}

59 60 61 62 63 64 65 66
const char *dav1d_version_vcs(void) {
    return DAV1D_VERSION_VCS;
}

unsigned int dav1d_version_int(void) {
    return DAV1D_VERSION_INT;
}

67 68 69
void dav1d_default_settings(Dav1dSettings *const s) {
    s->n_frame_threads = 1;
    s->n_tile_threads = 1;
70
    s->apply_grain = 1;
71 72 73
    s->allocator.cookie = NULL;
    s->allocator.alloc_picture_callback = default_picture_allocator;
    s->allocator.release_picture_callback = default_picture_release;
74 75
    s->logger.cookie = NULL;
    s->logger.callback = dav1d_log_default_callback;
76
    s->operating_point = 0;
77
    s->all_layers = 1; // just until the tests are adjusted
78 79
}

80 81
static void close_internal(Dav1dContext **const c_out, int flush);

82 83 84
int dav1d_open(Dav1dContext **const c_out,
               const Dav1dSettings *const s)
{
Henrik Gramner's avatar
Henrik Gramner committed
85 86 87
    static pthread_once_t initted = PTHREAD_ONCE_INIT;
    pthread_once(&initted, init_internal);

88 89 90
    validate_input_or_ret(c_out != NULL, -EINVAL);
    validate_input_or_ret(s != NULL, -EINVAL);
    validate_input_or_ret(s->n_tile_threads >= 1 &&
91
                          s->n_tile_threads <= DAV1D_MAX_TILE_THREADS, -EINVAL);
92
    validate_input_or_ret(s->n_frame_threads >= 1 &&
93
                          s->n_frame_threads <= DAV1D_MAX_FRAME_THREADS, -EINVAL);
94 95 96 97
    validate_input_or_ret(s->allocator.alloc_picture_callback != NULL,
                          -EINVAL);
    validate_input_or_ret(s->allocator.release_picture_callback != NULL,
                          -EINVAL);
98 99
    validate_input_or_ret(s->operating_point >= 0 &&
                          s->operating_point <= 31, -EINVAL);
100 101 102 103 104

    Dav1dContext *const c = *c_out = dav1d_alloc_aligned(sizeof(*c), 32);
    if (!c) goto error;
    memset(c, 0, sizeof(*c));

105
    c->allocator = s->allocator;
106
    c->logger = s->logger;
107
    c->apply_grain = s->apply_grain;
108
    c->operating_point = s->operating_point;
109
    c->all_layers = s->all_layers;
110 111
    c->frame_thread.flush = &c->frame_thread.flush_mem;
    atomic_init(c->frame_thread.flush, 0);
112 113 114 115 116 117 118
    c->n_fc = s->n_frame_threads;
    c->fc = dav1d_alloc_aligned(sizeof(*c->fc) * s->n_frame_threads, 32);
    if (!c->fc) goto error;
    memset(c->fc, 0, sizeof(*c->fc) * s->n_frame_threads);
    if (c->n_fc > 1) {
        c->frame_thread.out_delayed =
            malloc(sizeof(*c->frame_thread.out_delayed) * c->n_fc);
Derek Buitenhuis's avatar
Derek Buitenhuis committed
119
        if (!c->frame_thread.out_delayed) goto error;
120 121 122 123 124 125 126 127 128 129 130 131
        memset(c->frame_thread.out_delayed, 0,
               sizeof(*c->frame_thread.out_delayed) * c->n_fc);
    }
    for (int n = 0; n < s->n_frame_threads; n++) {
        Dav1dFrameContext *const f = &c->fc[n];
        f->c = c;
        f->lf.last_sharpness = -1;
        f->n_tc = s->n_tile_threads;
        f->tc = dav1d_alloc_aligned(sizeof(*f->tc) * s->n_tile_threads, 32);
        if (!f->tc) goto error;
        memset(f->tc, 0, sizeof(*f->tc) * s->n_tile_threads);
        if (f->n_tc > 1) {
132 133 134 135 136 137 138 139 140 141 142
            if (pthread_mutex_init(&f->tile_thread.lock, NULL)) goto error;
            if (pthread_cond_init(&f->tile_thread.cond, NULL)) {
                pthread_mutex_destroy(&f->tile_thread.lock);
                goto error;
            }
            if (pthread_cond_init(&f->tile_thread.icond, NULL)) {
                pthread_mutex_destroy(&f->tile_thread.lock);
                pthread_cond_destroy(&f->tile_thread.cond);
                goto error;
            }
            f->tile_thread.inited = 1;
143 144 145 146 147 148
        }
        for (int m = 0; m < s->n_tile_threads; m++) {
            Dav1dTileContext *const t = &f->tc[m];
            t->f = f;
            t->cf = dav1d_alloc_aligned(32 * 32 * sizeof(int32_t), 32);
            if (!t->cf) goto error;
149
            t->scratch.mem = dav1d_alloc_aligned(128 * 128 * 4, 32);
150 151 152
            if (!t->scratch.mem) goto error;
            memset(t->cf, 0, 32 * 32 * sizeof(int32_t));
            t->emu_edge =
153
                dav1d_alloc_aligned(320 * (256 + 7) * sizeof(uint16_t), 32);
154 155
            if (!t->emu_edge) goto error;
            if (f->n_tc > 1) {
156 157 158 159 160
                if (pthread_mutex_init(&t->tile_thread.td.lock, NULL)) goto error;
                if (pthread_cond_init(&t->tile_thread.td.cond, NULL)) {
                    pthread_mutex_destroy(&t->tile_thread.td.lock);
                    goto error;
                }
161
                t->tile_thread.fttd = &f->tile_thread;
162 163 164 165 166 167
                if (pthread_create(&t->tile_thread.td.thread, NULL, dav1d_tile_task, t)) {
                    pthread_cond_destroy(&t->tile_thread.td.cond);
                    pthread_mutex_destroy(&t->tile_thread.td.lock);
                    goto error;
                }
                t->tile_thread.td.inited = 1;
168 169 170
            }
        }
        f->libaom_cm = av1_alloc_ref_mv_common();
171
        if (!f->libaom_cm) goto error;
172
        if (c->n_fc > 1) {
173 174 175 176 177 178 179 180 181 182 183
            if (pthread_mutex_init(&f->frame_thread.td.lock, NULL)) goto error;
            if (pthread_cond_init(&f->frame_thread.td.cond, NULL)) {
                pthread_mutex_destroy(&f->frame_thread.td.lock);
                goto error;
            }
            if (pthread_create(&f->frame_thread.td.thread, NULL, dav1d_frame_task, f)) {
                pthread_cond_destroy(&f->frame_thread.td.cond);
                pthread_mutex_destroy(&f->frame_thread.td.lock);
                goto error;
            }
            f->frame_thread.td.inited = 1;
184 185 186 187 188
        }
    }

    // intra edge tree
    c->intra_edge.root[BL_128X128] = &c->intra_edge.branch_sb128[0].node;
189
    dav1d_init_mode_tree(c->intra_edge.root[BL_128X128], c->intra_edge.tip_sb128, 1);
190
    c->intra_edge.root[BL_64X64] = &c->intra_edge.branch_sb64[0].node;
191
    dav1d_init_mode_tree(c->intra_edge.root[BL_64X64], c->intra_edge.tip_sb64, 0);
192 193 194 195

    return 0;

error:
196
    if (c) close_internal(c_out, 0);
197 198 199
    return -ENOMEM;
}

200 201 202 203 204 205 206 207 208 209 210 211 212 213
static void dummy_free(const uint8_t *const data, void *const user_data) {
    assert(data && !user_data);
}

int dav1d_parse_sequence_header(Dav1dSequenceHeader *const out,
                                const uint8_t *const ptr, const size_t sz)
{
    Dav1dData buf = { 0 };
    int res;

    validate_input_or_ret(out != NULL, -EINVAL);

    Dav1dSettings s;
    dav1d_default_settings(&s);
214
    s.logger.callback = NULL;
215 216

    Dav1dContext *c;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
217
    res = dav1d_open(&c, &s);
218 219 220
    if (res < 0) return res;

    if (ptr) {
221
        res = dav1d_data_wrap_internal(&buf, ptr, sz, dummy_free, NULL);
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
        if (res < 0) goto error;
    }

    while (buf.sz > 0) {
        res = dav1d_parse_obus(c, &buf, 1);
        if (res < 0) goto error;

        assert((size_t)res <= buf.sz);
        buf.sz -= res;
        buf.data += res;
    }

    if (!c->seq_hdr) {
        res = -EINVAL;
        goto error;
    }

    memcpy(out, c->seq_hdr, sizeof(*out));

    res = 0;
error:
243
    dav1d_data_unref_internal(&buf);
244 245 246 247 248
    dav1d_close(&c);

    return res;
}

249 250 251 252 253 254
int dav1d_send_data(Dav1dContext *const c, Dav1dData *const in)
{
    validate_input_or_ret(c != NULL, -EINVAL);
    validate_input_or_ret(in != NULL, -EINVAL);
    validate_input_or_ret(in->data == NULL || in->sz, -EINVAL);

255 256
    c->drain = 0;

257 258 259 260 261 262 263
    if (c->in.data)
        return -EAGAIN;
    dav1d_data_move_ref(&c->in, in);

    return 0;
}

264 265 266
static int output_image(Dav1dContext *const c, Dav1dPicture *const out,
                        Dav1dPicture *const in)
{
267
    const Dav1dFilmGrainData *fgdata = &in->frame_hdr->film_grain.data;
268 269 270 271 272 273 274 275 276 277
    int has_grain = fgdata->num_y_points || fgdata->num_uv_points[0] ||
                    fgdata->num_uv_points[1];

    // If there is nothing to be done, skip the allocation/copy
    if (!c->apply_grain || !has_grain) {
        dav1d_picture_move_ref(out, in);
        return 0;
    }

    // Apply film grain to a new copy of the image to avoid corrupting refs
278
    int res = dav1d_picture_alloc_copy(c, out, in->p.w, in);
279
    if (res < 0) {
280 281
        dav1d_picture_unref_internal(in);
        dav1d_picture_unref_internal(out);
282
        return res;
283
    }
284 285

    switch (out->p.bpc) {
286
#if CONFIG_8BPC
287 288 289
    case 8:
        dav1d_apply_grain_8bpc(out, in);
        break;
290
#endif
Ronald S. Bultje's avatar
Ronald S. Bultje committed
291
#if CONFIG_16BPC
292
    case 10:
Ronald S. Bultje's avatar
Ronald S. Bultje committed
293 294
    case 12:
        dav1d_apply_grain_16bpc(out, in);
295
        break;
296
#endif
297
    default:
298
        assert(0);
299 300
    }

301
    dav1d_picture_unref_internal(in);
302 303 304
    return 0;
}

305 306 307 308 309 310 311 312
static int output_picture_ready(Dav1dContext *const c) {

    if (!c->out.data[0]) return 0;

    // skip lower spatial layers
    if (c->operating_point_idc && !c->all_layers) {
        const int max_spatial_id = ulog2(c->operating_point_idc >> 8);
        if (max_spatial_id > c->out.frame_hdr->spatial_id) {
313
            dav1d_picture_unref_internal(&c->out);
314 315 316 317 318 319 320
            return 0;
        }
    }

    return 1;
}

321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
static int drain_picture(Dav1dContext *const c, Dav1dPicture *const out) {
    unsigned drain_count = 0;
    do {
        const unsigned next = c->frame_thread.next;
        Dav1dFrameContext *const f = &c->fc[next];
        pthread_mutex_lock(&f->frame_thread.td.lock);
        while (f->n_tile_data > 0)
            pthread_cond_wait(&f->frame_thread.td.cond,
                              &f->frame_thread.td.lock);
        pthread_mutex_unlock(&f->frame_thread.td.lock);
        Dav1dThreadPicture *const out_delayed =
            &c->frame_thread.out_delayed[next];
        if (++c->frame_thread.next == c->n_fc)
            c->frame_thread.next = 0;
        if (out_delayed->p.data[0]) {
            const unsigned progress =
                atomic_load_explicit(&out_delayed->progress[1],
                                     memory_order_relaxed);
            if (out_delayed->visible && progress != FRAME_ERROR)
                dav1d_picture_ref(&c->out, &out_delayed->p);
            dav1d_thread_picture_unref(out_delayed);
342
            if (output_picture_ready(c))
343 344 345 346 347 348 349
                return output_image(c, out, &c->out);
        }
    } while (++drain_count < c->n_fc);

    return -EAGAIN;
}

350
int dav1d_get_picture(Dav1dContext *const c, Dav1dPicture *const out)
351 352 353 354 355 356
{
    int res;

    validate_input_or_ret(c != NULL, -EINVAL);
    validate_input_or_ret(out != NULL, -EINVAL);

357 358 359
    const int drain = c->drain;
    c->drain = 1;

360 361
    Dav1dData *const in = &c->in;
    if (!in->data) {
362
        if (c->n_fc == 1) return -EAGAIN;
363
        return drain_picture(c, out);
364 365 366
    }

    while (in->sz > 0) {
367 368
        res = dav1d_parse_obus(c, in, 0);
        if (res < 0) {
369
            dav1d_data_unref_internal(in);
370 371 372 373
        } else {
            assert((size_t)res <= in->sz);
            in->sz -= res;
            in->data += res;
374
            if (!in->sz) dav1d_data_unref_internal(in);
375
        }
376
        if (output_picture_ready(c))
377
            break;
378 379
        if (res < 0)
            return res;
380 381
    }

382
    if (output_picture_ready(c))
383
        return output_image(c, out, &c->out);
384

385 386 387
    if (c->n_fc > 1 && drain)
        return drain_picture(c, out);

388 389 390
    return -EAGAIN;
}

Ronald S. Bultje's avatar
Ronald S. Bultje committed
391
void dav1d_flush(Dav1dContext *const c) {
392
    dav1d_data_unref_internal(&c->in);
393
    c->drain = 0;
394

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
    for (int i = 0; i < 8; i++) {
        if (c->refs[i].p.p.data[0])
            dav1d_thread_picture_unref(&c->refs[i].p);
        dav1d_ref_dec(&c->refs[i].segmap);
        dav1d_ref_dec(&c->refs[i].refmvs);
        dav1d_cdf_thread_unref(&c->cdf[i]);
    }
    c->frame_hdr = NULL;
    c->seq_hdr = NULL;
    dav1d_ref_dec(&c->seq_hdr_ref);

    c->mastering_display = NULL;
    c->content_light = NULL;
    dav1d_ref_dec(&c->mastering_display_ref);
    dav1d_ref_dec(&c->content_light_ref);

Ronald S. Bultje's avatar
Ronald S. Bultje committed
411 412
    if (c->n_fc == 1) return;

Ronald S. Bultje's avatar
Ronald S. Bultje committed
413 414
    // mark each currently-running frame as flushing, so that we
    // exit out as quickly as the running thread checks this flag
415
    atomic_store(c->frame_thread.flush, 1);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
    for (unsigned n = 0, next = c->frame_thread.next; n < c->n_fc; n++, next++) {
        if (next == c->n_fc) next = 0;
        Dav1dFrameContext *const f = &c->fc[next];
        pthread_mutex_lock(&f->frame_thread.td.lock);
        if (f->n_tile_data > 0) {
            while (f->n_tile_data > 0)
                pthread_cond_wait(&f->frame_thread.td.cond,
                                  &f->frame_thread.td.lock);
            assert(!f->cur.data[0]);
        }
        pthread_mutex_unlock(&f->frame_thread.td.lock);
        Dav1dThreadPicture *const out_delayed = &c->frame_thread.out_delayed[next];
        if (out_delayed->p.data[0])
            dav1d_thread_picture_unref(out_delayed);
    }
431
    atomic_store(c->frame_thread.flush, 0);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
432 433

    c->frame_thread.next = 0;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
434 435
}

436 437
void dav1d_close(Dav1dContext **const c_out) {
    validate_input(c_out != NULL);
438 439
    close_internal(c_out, 1);
}
440

441
static void close_internal(Dav1dContext **const c_out, int flush) {
442 443
    Dav1dContext *const c = *c_out;
    if (!c) return;
444

445 446 447
    if (flush) dav1d_flush(c);

    for (unsigned n = 0; c->fc && n < c->n_fc; n++) {
448 449 450
        Dav1dFrameContext *const f = &c->fc[n];

        // clean-up threading stuff
451
        if (c->n_fc > 1 && f->frame_thread.td.inited) {
452 453 454 455 456 457
            pthread_mutex_lock(&f->frame_thread.td.lock);
            f->frame_thread.die = 1;
            pthread_cond_signal(&f->frame_thread.td.cond);
            pthread_mutex_unlock(&f->frame_thread.td.lock);
            pthread_join(f->frame_thread.td.thread, NULL);
            freep(&f->frame_thread.b);
458 459
            dav1d_freep_aligned(&f->frame_thread.pal_idx);
            dav1d_freep_aligned(&f->frame_thread.cf);
460
            freep(&f->frame_thread.tile_start_off);
461
            dav1d_freep_aligned(&f->frame_thread.pal);
462 463 464 465
            freep(&f->frame_thread.cbi);
            pthread_mutex_destroy(&f->frame_thread.td.lock);
            pthread_cond_destroy(&f->frame_thread.td.cond);
        }
466
        if (f->n_tc > 1 && f->tc && f->tile_thread.inited) {
467 468 469 470
            pthread_mutex_lock(&f->tile_thread.lock);
            for (int m = 0; m < f->n_tc; m++) {
                Dav1dTileContext *const t = &f->tc[m];
                t->tile_thread.die = 1;
471 472
                // mark not created tile threads as available
                if (!t->tile_thread.td.inited)
473
                    f->tile_thread.available |= 1ULL<<m;
474 475
            }
            pthread_cond_broadcast(&f->tile_thread.cond);
476
            while (f->tile_thread.available != ~0ULL >> (64 - f->n_tc))
477 478 479 480 481
                pthread_cond_wait(&f->tile_thread.icond,
                                  &f->tile_thread.lock);
            pthread_mutex_unlock(&f->tile_thread.lock);
            for (int m = 0; m < f->n_tc; m++) {
                Dav1dTileContext *const t = &f->tc[m];
482
                if (f->n_tc > 1 && t->tile_thread.td.inited) {
483 484 485 486 487 488 489 490
                    pthread_join(t->tile_thread.td.thread, NULL);
                    pthread_mutex_destroy(&t->tile_thread.td.lock);
                    pthread_cond_destroy(&t->tile_thread.td.cond);
                }
            }
            pthread_mutex_destroy(&f->tile_thread.lock);
            pthread_cond_destroy(&f->tile_thread.cond);
            pthread_cond_destroy(&f->tile_thread.icond);
491
            freep(&f->tile_thread.task_idx_to_sby_and_tile_idx);
492
        }
493
        for (int m = 0; f->tc && m < f->n_tc; m++) {
494 495 496 497 498
            Dav1dTileContext *const t = &f->tc[m];
            dav1d_free_aligned(t->cf);
            dav1d_free_aligned(t->scratch.mem);
            dav1d_free_aligned(t->emu_edge);
        }
499
        for (int m = 0; f->ts && m < f->n_ts; m++) {
500 501 502 503 504 505 506 507
            Dav1dTileState *const ts = &f->ts[m];
            pthread_cond_destroy(&ts->tile_thread.cond);
            pthread_mutex_destroy(&ts->tile_thread.lock);
        }
        free(f->ts);
        dav1d_free_aligned(f->tc);
        dav1d_free_aligned(f->ipred_edge[0]);
        free(f->a);
508
        free(f->tile);
509
        free(f->lf.mask);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
510
        free(f->lf.lr_mask);
511 512
        free(f->lf.level);
        free(f->lf.tx_lpf_right_edge[0]);
513
        if (f->libaom_cm) av1_free_ref_mv_common(f->libaom_cm);
514 515 516 517
        dav1d_free_aligned(f->lf.cdef_line);
        dav1d_free_aligned(f->lf.lr_lpf_line);
    }
    dav1d_free_aligned(c->fc);
518
    dav1d_data_unref_internal(&c->in);
519
    if (c->n_fc > 1 && c->frame_thread.out_delayed) {
520
        for (unsigned n = 0; n < c->n_fc; n++)
521 522 523 524 525
            if (c->frame_thread.out_delayed[n].p.data[0])
                dav1d_thread_picture_unref(&c->frame_thread.out_delayed[n]);
        free(c->frame_thread.out_delayed);
    }
    for (int n = 0; n < c->n_tile_data; n++)
526
        dav1d_data_unref_internal(&c->tile[n].data);
527
    free(c->tile);
528
    for (int n = 0; n < 8; n++) {
529
        dav1d_cdf_thread_unref(&c->cdf[n]);
530 531
        if (c->refs[n].p.p.data[0])
            dav1d_thread_picture_unref(&c->refs[n].p);
532 533
        dav1d_ref_dec(&c->refs[n].refmvs);
        dav1d_ref_dec(&c->refs[n].segmap);
534
    }
535 536 537
    dav1d_ref_dec(&c->seq_hdr_ref);
    dav1d_ref_dec(&c->frame_hdr_ref);

538 539 540
    dav1d_ref_dec(&c->mastering_display_ref);
    dav1d_ref_dec(&c->content_light_ref);

541
    dav1d_freep_aligned(c_out);
542
}
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560

void dav1d_picture_unref(Dav1dPicture *const p) {
    dav1d_picture_unref_internal(p);
}

uint8_t *dav1d_data_create(Dav1dData *const buf, const size_t sz) {
    return dav1d_data_create_internal(buf, sz);
}

int dav1d_data_wrap(Dav1dData *const buf, const uint8_t *const ptr,
                    const size_t sz,
                    void (*const free_callback)(const uint8_t *data,
                                                void *user_data),
                    void *const user_data)
{
    return dav1d_data_wrap_internal(buf, ptr, sz, free_callback, user_data);
}

561 562 563 564 565 566 567 568 569 570 571 572
int dav1d_data_wrap_user_data(Dav1dData *const buf,
                              const uint8_t *const user_data,
                              void (*const free_callback)(const uint8_t *user_data,
                                                          void *cookie),
                              void *const cookie)
{
    return dav1d_data_wrap_user_data_internal(buf,
                                              user_data,
                                              free_callback,
                                              cookie);
}

573 574 575
void dav1d_data_unref(Dav1dData *const buf) {
    dav1d_data_unref_internal(buf);
}