Commit 99239678 authored by Henrik Gramner's avatar Henrik Gramner

Correctly use dav1d_ prefix for global symbols

parent aa0fc471
......@@ -4055,26 +4055,26 @@ static CdfThreadContext cdf_init[4] = {
[3] = { .cdf = NULL },
};
void av1_init_states(CdfThreadContext *const cdf, const int qidx) {
void dav1d_init_states(CdfThreadContext *const cdf, const int qidx) {
const int qcat = get_qcat_idx(qidx);
if (cdf_init[qcat].cdf) {
cdf_thread_ref(cdf, &cdf_init[qcat]);
dav1d_cdf_thread_ref(cdf, &cdf_init[qcat]);
return;
}
cdf_thread_alloc(&cdf_init[qcat], NULL);
dav1d_cdf_thread_alloc(&cdf_init[qcat], NULL);
cdf_init[qcat].cdf->m = av1_default_cdf;
memcpy(cdf_init[qcat].cdf->kfym, default_kf_y_mode_cdf,
sizeof(default_kf_y_mode_cdf));
cdf_init[qcat].cdf->coef = av1_default_coef_cdf[qcat];
cdf_init[qcat].cdf->mv = default_mv_cdf;
cdf_init[qcat].cdf->dmv = default_mv_cdf;
cdf_thread_ref(cdf, &cdf_init[qcat]);
dav1d_cdf_thread_ref(cdf, &cdf_init[qcat]);
}
void av1_update_tile_cdf(const Av1FrameHeader *const hdr,
CdfContext *const dst,
const CdfContext *const src)
void dav1d_update_tile_cdf(const Av1FrameHeader *const hdr,
CdfContext *const dst,
const CdfContext *const src)
{
int i, j, k, l;
......@@ -4210,7 +4210,9 @@ void av1_update_tile_cdf(const Av1FrameHeader *const hdr,
/*
* CDF threading wrappers.
*/
void cdf_thread_alloc(CdfThreadContext *const cdf, struct thread_data *const t) {
void dav1d_cdf_thread_alloc(CdfThreadContext *const cdf,
struct thread_data *const t)
{
cdf->ref = dav1d_ref_create(sizeof(CdfContext) +
(t != NULL) * sizeof(atomic_uint));
cdf->cdf = cdf->ref->data;
......@@ -4221,17 +4223,19 @@ void cdf_thread_alloc(CdfThreadContext *const cdf, struct thread_data *const t)
}
}
void cdf_thread_ref(CdfThreadContext *const dst, CdfThreadContext *const src) {
void dav1d_cdf_thread_ref(CdfThreadContext *const dst,
CdfThreadContext *const src)
{
dav1d_ref_inc(src->ref);
*dst = *src;
}
void cdf_thread_unref(CdfThreadContext *const cdf) {
void dav1d_cdf_thread_unref(CdfThreadContext *const cdf) {
dav1d_ref_dec(cdf->ref);
memset(cdf, 0, sizeof(*cdf));
}
void cdf_thread_wait(CdfThreadContext *const cdf) {
void dav1d_cdf_thread_wait(CdfThreadContext *const cdf) {
if (!cdf->t) return;
if (atomic_load(cdf->progress)) return;
......@@ -4241,7 +4245,7 @@ void cdf_thread_wait(CdfThreadContext *const cdf) {
pthread_mutex_unlock(&cdf->t->lock);
}
void cdf_thread_signal(CdfThreadContext *const cdf) {
void dav1d_cdf_thread_signal(CdfThreadContext *const cdf) {
if (!cdf->t) return;
pthread_mutex_lock(&cdf->t->lock);
......
......@@ -131,18 +131,18 @@ typedef struct CdfThreadContext {
atomic_uint *progress;
} CdfThreadContext;
void av1_init_states(CdfThreadContext *cdf, int qidx);
void av1_update_tile_cdf(const Av1FrameHeader *hdr, CdfContext *dst,
void dav1d_init_states(CdfThreadContext *cdf, int qidx);
void dav1d_update_tile_cdf(const Av1FrameHeader *hdr, CdfContext *dst,
const CdfContext *src);
void cdf_thread_alloc(CdfThreadContext *cdf, struct thread_data *t);
void cdf_thread_ref(CdfThreadContext *dst, CdfThreadContext *src);
void cdf_thread_unref(CdfThreadContext *cdf);
void dav1d_cdf_thread_alloc(CdfThreadContext *cdf, struct thread_data *t);
void dav1d_cdf_thread_ref(CdfThreadContext *dst, CdfThreadContext *src);
void dav1d_cdf_thread_unref(CdfThreadContext *cdf);
/*
* These are binary signals (so a signal is either "done" or "not done").
*/
void cdf_thread_wait(CdfThreadContext *cdf);
void cdf_thread_signal(CdfThreadContext *cdf);
void dav1d_cdf_thread_wait(CdfThreadContext *cdf);
void dav1d_cdf_thread_signal(CdfThreadContext *cdf);
#endif /* __AV1_CDF_H__ */
This diff is collapsed.
......@@ -30,6 +30,6 @@
#include "src/internal.h"
int submit_frame(Dav1dContext *c);
int dav1d_submit_frame(Dav1dContext *c);
#endif /* __DAV1D_SRC_DECODE_H__ */
......@@ -132,7 +132,7 @@ static inline enum TxfmTypeSet get_ext_txtp_set(const enum RectTxfmSize tx,
return TXTP_SET_LOSSLESS;
}
const TxfmInfo *const t_dim = &av1_txfm_dimensions[tx];
const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[tx];
if (t_dim->max >= TX_64X64)
return TXTP_SET_DCT;
......@@ -161,9 +161,9 @@ static inline enum TxfmType get_uv_intra_txtp(const enum IntraPredMode uv_mode,
return WHT_WHT;
}
const TxfmInfo *const t_dim = &av1_txfm_dimensions[tx];
const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[tx];
return t_dim->max == TX_32X32 ? DCT_DCT : av1_txtp_from_uvmode[uv_mode];
return t_dim->max == TX_32X32 ? DCT_DCT : dav1d_txtp_from_uvmode[uv_mode];
}
static inline enum TxfmType get_uv_inter_txtp(const TxfmInfo *const uvt_dim,
......@@ -531,7 +531,7 @@ static inline int get_coef_skip_ctx(const TxfmInfo *const t_dim,
const int chroma,
const enum Dav1dPixelLayout layout)
{
const uint8_t *const b_dim = av1_block_dimensions[bs];
const uint8_t *const b_dim = dav1d_block_dimensions[bs];
if (chroma) {
const int ss_ver = layout == DAV1D_PIXEL_LAYOUT_I420;
......@@ -610,7 +610,7 @@ static inline int get_coef_nz_ctx(uint8_t *const levels, const int scan_idx,
const enum RectTxfmSize tx,
const enum TxClass tx_class)
{
const TxfmInfo *const t_dim = &av1_txfm_dimensions[tx];
const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[tx];
if (is_eob) {
if (scan_idx == 0) return 0;
......@@ -640,7 +640,7 @@ static inline int get_coef_nz_ctx(uint8_t *const levels, const int scan_idx,
const int ctx = imin((mag + 1) >> 1, 4);
if (tx_class == TX_CLASS_2D) {
return !rc ? 0 :
av1_nz_map_ctx_offset[tx][imin(y, 4)][imin(x, 4)] + ctx;
dav1d_nz_map_ctx_offset[tx][imin(y, 4)][imin(x, 4)] + ctx;
} else {
return 26 + imin((tx_class == TX_CLASS_V) ? y : x, 2) * 5 + ctx;
}
......@@ -685,7 +685,7 @@ static inline int get_br_ctx(const uint8_t *const levels,
const int rc, const enum RectTxfmSize tx,
const enum TxClass tx_class)
{
const TxfmInfo *const t_dim = &av1_txfm_dimensions[tx];
const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[tx];
const int x = rc >> (imin(t_dim->lh, 3) + 2);
const int y = rc & (4 * imin(t_dim->h, 8) - 1);
const int stride = 4 * (imin(t_dim->h, 8) + 1);
......
......@@ -33,8 +33,8 @@
#include "src/getbits.h"
void init_get_bits(GetBits *const c,
const uint8_t *const data, const size_t sz)
void dav1d_init_get_bits(GetBits *const c, const uint8_t *const data,
const size_t sz)
{
c->ptr = c->ptr_start = data;
c->ptr_end = &c->ptr_start[sz];
......@@ -60,7 +60,7 @@ static void refill(GetBits *const c, const unsigned n) {
c->state |= state << (64 - c->bits_left);
}
unsigned get_bits(GetBits *const c, const unsigned n) {
unsigned dav1d_get_bits(GetBits *const c, const unsigned n) {
assert(n <= 32 /* can go up to 57 if we change return type */);
if (n > c->bits_left) refill(c, n);
......@@ -72,27 +72,27 @@ unsigned get_bits(GetBits *const c, const unsigned n) {
return state >> (64 - n);
}
int get_sbits(GetBits *const c, const unsigned n) {
int dav1d_get_sbits(GetBits *const c, const unsigned n) {
const int shift = 31 - n;
const int res = get_bits(c, n + 1) << shift;
const int res = dav1d_get_bits(c, n + 1) << shift;
return res >> shift;
}
unsigned get_uniform(GetBits *const c, const unsigned n) {
unsigned dav1d_get_uniform(GetBits *const c, const unsigned n) {
assert(n > 0);
const int l = ulog2(n) + 1;
assert(l > 0);
const int m = (1 << l) - n;
const int v = get_bits(c, l - 1);
return v < m ? v : (v << 1) - m + get_bits(c, 1);
const int v = dav1d_get_bits(c, l - 1);
return v < m ? v : (v << 1) - m + dav1d_get_bits(c, 1);
}
unsigned get_vlc(GetBits *const c) {
unsigned dav1d_get_vlc(GetBits *const c) {
int n_bits = 0;
while (!get_bits(c, 1))
while (!dav1d_get_bits(c, 1))
if (++n_bits == 32)
return 0xFFFFFFFFU;
return ((1 << n_bits) - 1) + get_bits(c, n_bits);
return ((1 << n_bits) - 1) + dav1d_get_bits(c, n_bits);
}
static unsigned get_bits_subexp_u(GetBits *const c, const unsigned ref,
......@@ -104,12 +104,12 @@ static unsigned get_bits_subexp_u(GetBits *const c, const unsigned ref,
const int b = i ? 3 + i - 1 : 3;
if (n < v + 3 * (1 << b)) {
v += get_uniform(c, n - v + 1);
v += dav1d_get_uniform(c, n - v + 1);
break;
}
if (!get_bits(c, 1)) {
v += get_bits(c, b);
if (!dav1d_get_bits(c, 1)) {
v += dav1d_get_bits(c, b);
break;
}
......@@ -119,11 +119,11 @@ static unsigned get_bits_subexp_u(GetBits *const c, const unsigned ref,
return ref * 2 <= n ? inv_recenter(ref, v) : n - inv_recenter(n - ref, v);
}
int get_bits_subexp(GetBits *const c, const int ref, const unsigned n) {
int dav1d_get_bits_subexp(GetBits *const c, const int ref, const unsigned n) {
return (int) get_bits_subexp_u(c, ref + (1 << n), 2 << n) - (1 << n);
}
const uint8_t *flush_get_bits(GetBits *c) {
const uint8_t *dav1d_flush_get_bits(GetBits *c) {
c->bits_left = 0;
c->state = 0;
return c->ptr;
......
......@@ -38,12 +38,12 @@ typedef struct GetBits {
const uint8_t *ptr, *ptr_start, *ptr_end;
} GetBits;
void init_get_bits(GetBits *c, const uint8_t *data, size_t sz);
unsigned get_bits(GetBits *c, unsigned n);
int get_sbits(GetBits *c, unsigned n);
unsigned get_uniform(GetBits *c, unsigned range);
unsigned get_vlc(GetBits *c);
int get_bits_subexp(GetBits *c, int ref, unsigned n);
const uint8_t *flush_get_bits(GetBits *c);
void dav1d_init_get_bits(GetBits *c, const uint8_t *data, size_t sz);
unsigned dav1d_get_bits(GetBits *c, unsigned n);
int dav1d_get_sbits(GetBits *c, unsigned n);
unsigned dav1d_get_uniform(GetBits *c, unsigned range);
unsigned dav1d_get_vlc(GetBits *c);
int dav1d_get_bits_subexp(GetBits *c, int ref, unsigned n);
const uint8_t *dav1d_flush_get_bits(GetBits *c);
#endif /* __DAV1D_SRC_GETBITS_H__ */
......@@ -137,8 +137,8 @@ static void init_mode_node(EdgeBranch *const nwc,
}
}
void init_mode_tree(EdgeNode *const root_node, EdgeTip *const nt,
const int allow_sb128)
void dav1d_init_mode_tree(EdgeNode *const root_node, EdgeTip *const nt,
const int allow_sb128)
{
EdgeBranch *const root = (EdgeBranch *) root_node;
struct ModeSelMem mem;
......
......@@ -51,7 +51,7 @@ typedef struct EdgeBranch {
EdgeNode *split[4];
} EdgeBranch;
void init_mode_tree(EdgeNode *const root, EdgeTip *const nt,
const int allow_sb128);
void dav1d_init_mode_tree(EdgeNode *const root, EdgeTip *const nt,
const int allow_sb128);
#endif /* __DAV1D_SRC_INTRA_EDGE_H__ */
......@@ -74,16 +74,16 @@ static const struct {
};
enum IntraPredMode
bytefn(prepare_intra_edges)(const int x, const int have_left,
const int y, const int have_top,
const int w, const int h,
const enum EdgeFlags edge_flags,
const pixel *const dst,
const ptrdiff_t stride,
const pixel *prefilter_toplevel_sb_edge,
enum IntraPredMode mode, int *const angle,
const int tw, const int th,
pixel *const topleft_out)
bytefn(dav1d_prepare_intra_edges)(const int x, const int have_left,
const int y, const int have_top,
const int w, const int h,
const enum EdgeFlags edge_flags,
const pixel *const dst,
const ptrdiff_t stride,
const pixel *prefilter_toplevel_sb_edge,
enum IntraPredMode mode, int *const angle,
const int tw, const int th,
pixel *const topleft_out)
{
assert(y < h && x < w);
......
......@@ -76,12 +76,12 @@
* they will be extended from nearby edges as defined by the av1 spec.
*/
enum IntraPredMode
bytefn(prepare_intra_edges)(int x, int have_left, int y, int have_top,
int w, int h, enum EdgeFlags edge_flags,
const pixel *dst, ptrdiff_t stride,
const pixel *prefilter_toplevel_sb_edge,
enum IntraPredMode mode, int *angle,
int tw, int th, pixel *topleft_out);
bytefn(dav1d_prepare_intra_edges)(int x, int have_left, int y, int have_top,
int w, int h, enum EdgeFlags edge_flags,
const pixel *dst, ptrdiff_t stride,
const pixel *prefilter_toplevel_sb_edge,
enum IntraPredMode mode, int *angle,
int tw, int th, pixel *topleft_out);
// is or'ed with the angle argument into intra predictors to signal that edges
// are smooth and should use reduced filter strength
......
......@@ -42,7 +42,7 @@ static void decomp_tx(uint8_t (*txa)[2 /* txsz, step */][32 /* y */][32 /* x */]
const int y_off, const int x_off,
const uint16_t *const tx_masks)
{
const TxfmInfo *const t_dim = &av1_txfm_dimensions[from];
const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[from];
int is_split;
if (depth > 1) {
......@@ -87,7 +87,7 @@ static inline void mask_edges_inter(uint32_t (*masks)[32][3],
const uint16_t *const tx_masks,
uint8_t *const a, uint8_t *const l)
{
const TxfmInfo *const t_dim = &av1_txfm_dimensions[max_tx];
const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[max_tx];
int y, x;
uint8_t txa[2 /* edge */][2 /* txsz, step */][32 /* y */][32 /* x */];
......@@ -151,7 +151,7 @@ static inline void mask_edges_intra(uint32_t (*const masks)[32][3],
const enum RectTxfmSize tx,
uint8_t *const a, uint8_t *const l)
{
const TxfmInfo *const t_dim = &av1_txfm_dimensions[tx];
const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[tx];
const int twl4 = t_dim->lw, thl4 = t_dim->lh;
const int twl4c = imin(2, twl4), thl4c = imin(2, thl4);
int y, x;
......@@ -194,7 +194,7 @@ static inline void mask_edges_chroma(uint32_t (*const masks)[32][2],
const enum RectTxfmSize tx,
uint8_t *const a, uint8_t *const l)
{
const TxfmInfo *const t_dim = &av1_txfm_dimensions[tx];
const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[tx];
const int twl4 = t_dim->lw, thl4 = t_dim->lh;
const int twl4c = !!twl4, thl4c = !!thl4;
int y, x;
......@@ -249,7 +249,7 @@ void dav1d_create_lf_mask_intra(Av1Filter *const lflvl,
if (!hdr->loopfilter.level_y[0] && !hdr->loopfilter.level_y[1])
return;
const uint8_t *const b_dim = av1_block_dimensions[bs];
const uint8_t *const b_dim = dav1d_block_dimensions[bs];
const int bw4 = imin(iw - bx, b_dim[0]);
const int bh4 = imin(ih - by, b_dim[1]);
const int bx4 = bx & 31;
......@@ -297,7 +297,7 @@ void dav1d_create_lf_mask_inter(Av1Filter *const lflvl,
if (!hdr->loopfilter.level_y[0] && !hdr->loopfilter.level_y[1])
return;
const uint8_t *const b_dim = av1_block_dimensions[bs];
const uint8_t *const b_dim = dav1d_block_dimensions[bs];
const int bw4 = imin(iw - bx, b_dim[0]);
const int bh4 = imin(ih - by, b_dim[1]);
const int bx4 = bx & 31;
......@@ -315,7 +315,7 @@ void dav1d_create_lf_mask_inter(Av1Filter *const lflvl,
}
mask_edges_inter(lflvl->filter_y, by4, bx4, bw4, bh4, skip,
av1_max_txfm_size_for_bs[bs][0], tx_masks, ay, ly);
dav1d_max_txfm_size_for_bs[bs][0], tx_masks, ay, ly);
if (!auv) return;
......
......@@ -45,9 +45,9 @@
#include "src/wedge.h"
static void init_internal(void) {
av1_init_wedge_masks();
av1_init_interintra_masks();
av1_init_qm_tables();
dav1d_init_wedge_masks();
dav1d_init_interintra_masks();
dav1d_init_qm_tables();
}
const char *dav1d_version(void) {
......@@ -129,9 +129,9 @@ int dav1d_open(Dav1dContext **const c_out,
// intra edge tree
c->intra_edge.root[BL_128X128] = &c->intra_edge.branch_sb128[0].node;
init_mode_tree(c->intra_edge.root[BL_128X128], c->intra_edge.tip_sb128, 1);
dav1d_init_mode_tree(c->intra_edge.root[BL_128X128], c->intra_edge.tip_sb128, 1);
c->intra_edge.root[BL_64X64] = &c->intra_edge.branch_sb64[0].node;
init_mode_tree(c->intra_edge.root[BL_64X64], c->intra_edge.tip_sb64, 0);
dav1d_init_mode_tree(c->intra_edge.root[BL_64X64], c->intra_edge.tip_sb64, 0);
return 0;
......@@ -190,7 +190,7 @@ int dav1d_decode(Dav1dContext *const c,
}
while (in->sz > 0) {
if ((res = parse_obus(c, in)) < 0)
if ((res = dav1d_parse_obus(c, in)) < 0)
return res;
assert(res <= in->sz);
......@@ -301,7 +301,7 @@ void dav1d_close(Dav1dContext **const c_out) {
dav1d_data_unref(&c->tile[n].data);
for (int n = 0; n < 8; n++) {
if (c->cdf[n].cdf)
cdf_thread_unref(&c->cdf[n]);
dav1d_cdf_thread_unref(&c->cdf[n]);
if (c->refs[n].p.p.data[0])
dav1d_thread_picture_unref(&c->refs[n].p);
if (c->refs[n].refmvs)
......
......@@ -424,9 +424,9 @@ static void selfguided_filter(int32_t *dst, const pixel *src,
const uint32_t p = (a * n >= b * b) * (a * n - b * b);
const uint32_t z = (p * s + (1 << 19)) >> 20;
const int x = sgr_x_by_xplus1[imin(z, 255)];
const int x = dav1d_sgr_x_by_xplus1[imin(z, 255)];
// This is where we invert A and B, so that B is of size coef.
AA[i] = (((1 << 8) - x) * BB[i] * sgr_one_by_x[n - 1] + (1 << 11)) >> 12;
AA[i] = (((1 << 8) - x) * BB[i] * dav1d_sgr_one_by_x[n - 1] + (1 << 11)) >> 12;
BB[i] = x;
}
AA += step * REST_UNIT_STRIDE;
......@@ -504,8 +504,8 @@ static void selfguided_c(pixel *p, const ptrdiff_t p_stride,
int32_t dst[64 * 384];
// both r1 and r0 can't be zero
if (!sgr_params[sgr_idx][0]) {
const int s1 = sgr_params[sgr_idx][3];
if (!dav1d_sgr_params[sgr_idx][0]) {
const int s1 = dav1d_sgr_params[sgr_idx][3];
selfguided_filter(dst, tmp, REST_UNIT_STRIDE, w, h, 9, s1);
const int w1 = (1 << 7) - sgr_w[1];
for (int j = 0; j < h; j++) {
......@@ -516,8 +516,8 @@ static void selfguided_c(pixel *p, const ptrdiff_t p_stride,
}
p += PXSTRIDE(p_stride);
}
} else if (!sgr_params[sgr_idx][1]) {
const int s0 = sgr_params[sgr_idx][2];
} else if (!dav1d_sgr_params[sgr_idx][1]) {
const int s0 = dav1d_sgr_params[sgr_idx][2];
selfguided_filter(dst, tmp, REST_UNIT_STRIDE, w, h, 25, s0);
const int w0 = sgr_w[0];
for (int j = 0; j < h; j++) {
......@@ -530,8 +530,8 @@ static void selfguided_c(pixel *p, const ptrdiff_t p_stride,
}
} else {
int32_t dst1[64 * 384];
const int s0 = sgr_params[sgr_idx][2];
const int s1 = sgr_params[sgr_idx][3];
const int s0 = dav1d_sgr_params[sgr_idx][2];
const int s1 = dav1d_sgr_params[sgr_idx][3];
const int w0 = sgr_w[0];
const int w1 = (1 << 7) - w0 - sgr_w[1];
selfguided_filter(dst, tmp, REST_UNIT_STRIDE, w, h, 25, s0);
......
This diff is collapsed.
......@@ -31,6 +31,6 @@
#include "dav1d/data.h"
#include "src/internal.h"
int parse_obus(Dav1dContext *c, Dav1dData *in);
int dav1d_parse_obus(Dav1dContext *c, Dav1dData *in);
#endif /* __DAV1D_SRC_OBU_H__ */
......@@ -3063,7 +3063,7 @@ static const uint8_t qm_tbl_32x32_t[][2][528] = {
},
};
const uint8_t *av1_qm_tbl[16][2][N_RECT_TX_SIZES];
const uint8_t *dav1d_qm_tbl[16][2][N_RECT_TX_SIZES];
static uint8_t pb_32x32[32 * 32];
static uint8_t qm_tbl_4x4[15][2][16];
static uint8_t qm_tbl_4x8[15][2][32];
......@@ -3104,47 +3104,47 @@ static void untriangle(uint8_t *dst, const uint8_t *src, const int sz) {
}
}
void av1_init_qm_tables(void) {
void dav1d_init_qm_tables(void) {
// This function is guaranteed to be called only once
for (int i = 0; i < 15; i++)
for (int j = 0; j < 2; j++) {
// note that the w/h in the assignment is inverted, this is on purpose
// because we store coefficients transposed
av1_qm_tbl[i][j][RTX_4X8 ] = qm_tbl_8x4[i][j];
av1_qm_tbl[i][j][RTX_8X4 ] = qm_tbl_4x8[i][j];
dav1d_qm_tbl[i][j][RTX_4X8 ] = qm_tbl_8x4[i][j];
dav1d_qm_tbl[i][j][RTX_8X4 ] = qm_tbl_4x8[i][j];
transpose(qm_tbl_4x8[i][j], qm_tbl_8x4[i][j], 8, 4);
av1_qm_tbl[i][j][RTX_4X16 ] = qm_tbl_16x4[i][j];
av1_qm_tbl[i][j][RTX_16X4 ] = qm_tbl_4x16[i][j];
dav1d_qm_tbl[i][j][RTX_4X16 ] = qm_tbl_16x4[i][j];
dav1d_qm_tbl[i][j][RTX_16X4 ] = qm_tbl_4x16[i][j];
transpose(qm_tbl_4x16[i][j], qm_tbl_16x4[i][j], 16, 4);
av1_qm_tbl[i][j][RTX_8X16 ] = qm_tbl_16x8[i][j];
av1_qm_tbl[i][j][RTX_16X8 ] = qm_tbl_8x16[i][j];
dav1d_qm_tbl[i][j][RTX_8X16 ] = qm_tbl_16x8[i][j];
dav1d_qm_tbl[i][j][RTX_16X8 ] = qm_tbl_8x16[i][j];
transpose(qm_tbl_8x16[i][j], qm_tbl_16x8[i][j], 16, 8);
av1_qm_tbl[i][j][RTX_8X32 ] = qm_tbl_32x8[i][j];
av1_qm_tbl[i][j][RTX_32X8 ] = qm_tbl_8x32[i][j];
dav1d_qm_tbl[i][j][RTX_8X32 ] = qm_tbl_32x8[i][j];
dav1d_qm_tbl[i][j][RTX_32X8 ] = qm_tbl_8x32[i][j];
transpose(qm_tbl_8x32[i][j], qm_tbl_32x8[i][j], 32, 8);
av1_qm_tbl[i][j][RTX_16X32] = qm_tbl_32x16[i][j];
av1_qm_tbl[i][j][RTX_32X16] = qm_tbl_16x32[i][j];
dav1d_qm_tbl[i][j][RTX_16X32] = qm_tbl_32x16[i][j];
dav1d_qm_tbl[i][j][RTX_32X16] = qm_tbl_16x32[i][j];
transpose(qm_tbl_16x32[i][j], qm_tbl_32x16[i][j], 32, 16);
av1_qm_tbl[i][j][ TX_4X4 ] = qm_tbl_4x4[i][j];
av1_qm_tbl[i][j][ TX_8X8 ] = qm_tbl_8x8[i][j];
av1_qm_tbl[i][j][ TX_16X16] = qm_tbl_16x16[i][j];
av1_qm_tbl[i][j][ TX_32X32] = qm_tbl_32x32[i][j];
dav1d_qm_tbl[i][j][ TX_4X4 ] = qm_tbl_4x4[i][j];
dav1d_qm_tbl[i][j][ TX_8X8 ] = qm_tbl_8x8[i][j];
dav1d_qm_tbl[i][j][ TX_16X16] = qm_tbl_16x16[i][j];
dav1d_qm_tbl[i][j][ TX_32X32] = qm_tbl_32x32[i][j];
untriangle(qm_tbl_4x4[i][j], qm_tbl_4x4_t[i][j], 4);
untriangle(qm_tbl_8x8[i][j], qm_tbl_8x8_t[i][j], 8);
untriangle(qm_tbl_32x32[i][j], qm_tbl_32x32_t[i][j], 32);
subsample(qm_tbl_16x16[i][j], qm_tbl_32x32[i][j], 16, 2);
av1_qm_tbl[i][j][ TX_64X64] = av1_qm_tbl[i][j][ TX_32X32];
av1_qm_tbl[i][j][RTX_64X32] = av1_qm_tbl[i][j][ TX_32X32];
av1_qm_tbl[i][j][RTX_64X16] = av1_qm_tbl[i][j][RTX_32X16];
av1_qm_tbl[i][j][RTX_32X64] = av1_qm_tbl[i][j][ TX_32X32];
av1_qm_tbl[i][j][RTX_16X64] = av1_qm_tbl[i][j][RTX_16X32];
dav1d_qm_tbl[i][j][ TX_64X64] = dav1d_qm_tbl[i][j][ TX_32X32];
dav1d_qm_tbl[i][j][RTX_64X32] = dav1d_qm_tbl[i][j][ TX_32X32];
dav1d_qm_tbl[i][j][RTX_64X16] = dav1d_qm_tbl[i][j][RTX_32X16];
dav1d_qm_tbl[i][j][RTX_32X64] = dav1d_qm_tbl[i][j][ TX_32X32];
dav1d_qm_tbl[i][j][RTX_16X64] = dav1d_qm_tbl[i][j][RTX_16X32];
}
memset(pb_32x32, 32, sizeof(pb_32x32));
for (int j = 0; j < 2; j++)
for (int k = 0; k < N_RECT_TX_SIZES; k++)
av1_qm_tbl[15][j][k] = pb_32x32;
dav1d_qm_tbl[15][j][k] = pb_32x32;
}
......@@ -30,8 +30,8 @@
#include "src/levels.h"
extern const uint8_t *av1_qm_tbl[16][2][N_RECT_TX_SIZES];
extern const uint8_t *dav1d_qm_tbl[16][2][N_RECT_TX_SIZES];
void av1_init_qm_tables(void);
void dav1d_init_qm_tables(void);
#endif /* __DAV1D_SRC_QM_H__ */
This diff is collapsed.
......@@ -57,19 +57,19 @@ typedef decl_backup_ipred_edge_fn(*backup_ipred_edge_fn);
void (name)(Dav1dTileContext *t, enum BlockSize bs, const Av1Block *b)
typedef decl_read_coef_blocks_fn(*read_coef_blocks_fn);
decl_recon_b_intra_fn(recon_b_intra_8bpc);
decl_recon_b_intra_fn(recon_b_intra_16bpc);
decl_recon_b_intra_fn(dav1d_recon_b_intra_8bpc);
decl_recon_b_intra_fn(dav1d_recon_b_intra_16bpc);
decl_recon_b_inter_fn(recon_b_inter_8bpc);
decl_recon_b_inter_fn(recon_b_inter_16bpc);
decl_recon_b_inter_fn(dav1d_recon_b_inter_8bpc);
decl_recon_b_inter_fn(dav1d_recon_b_inter_16bpc);
decl_filter_sbrow_fn(filter_sbrow_8bpc);
decl_filter_sbrow_fn(filter_sbrow_16bpc);
decl_filter_sbrow_fn(dav1d_filter_sbrow_8bpc);
decl_filter_sbrow_fn(dav1d_filter_sbrow_16bpc);
decl_backup_ipred_edge_fn(backup_ipred_edge_8bpc);
decl_backup_ipred_edge_fn(backup_ipred_edge_16bpc);
decl_backup_ipred_edge_fn(dav1d_backup_ipred_edge_8bpc);
decl_backup_ipred_edge_fn(dav1d_backup_ipred_edge_16bpc);
decl_read_coef_blocks_fn(read_coef_blocks_8bpc);
decl_read_coef_blocks_fn(read_coef_blocks_16bpc);
decl_read_coef_blocks_fn(dav1d_read_coef_blocks_8bpc);
decl_read_coef_blocks_fn(dav1d_read_coef_blocks_16bpc);
#endif /* __DAV1D_SRC_RECON_H__ */
......@@ -3235,7 +3235,7 @@ enum BlockSize {
BS_4x4,
N_BS_SIZES,
};
extern const uint8_t av1_block_dimensions[N_BS_SIZES][4];
extern const uint8_t dav1d_block_dimensions[N_BS_SIZES][4];
const uint8_t bs_to_sbtype[N_BS_SIZES] = {
[BS_128x128] = BLOCK_128X128,
[BS_128x64] = BLOCK_128X64,
......@@ -3308,8 +3308,8 @@ void av1_find_ref_mvs(CANDIDATE_MV *mvstack, int *cnt, int_mv (*mvlist)[2],
int tile_row_start4, int tile_row_end4,
AV1_COMMON *cm)
{
const int bw4 = av1_block_dimensions[bs][0];
const int bh4 = av1_block_dimensions[bs][1];
const int bw4 = dav1d_block_dimensions[bs][0];
const int bh4 = dav1d_block_dimensions[bs][1];
int stride = cm->cur_frame.mv_stride;
MACROBLOCKD xd = (MACROBLOCKD) {
.n8_w = bw4,
......
......@@ -71,8 +71,8 @@ static inline void splat_oneref_mv(refmvs *r, const ptrdiff_t stride,
const int ref, const mv mv,
const int is_interintra)
{
const int bw4 = av1_block_dimensions[bs][0];
int bh4 = av1_block_dimensions[bs][1];
const int bw4 = dav1d_block_dimensions[bs][0];
int bh4 = dav1d_block_dimensions[bs][1];
r += by4 * stride + bx4;
const refmvs tmpl = (refmvs) {
......@@ -92,8 +92,8 @@ static inline void splat_intrabc_mv(refmvs *r, const ptrdiff_t stride,
const int by4, const int bx4,
const enum BlockSize bs, const mv mv)
{
const int bw4 = av1_block_dimensions[bs][0];
int bh4 = av1_block_dimensions[bs][1];
const int bw4 = dav1d_block_dimensions[bs][0];
int bh4 = dav1d_block_dimensions[bs][1];
r += by4 * stride + bx4;
const refmvs tmpl = (refmvs) {
......@@ -116,8 +116,8 @@ static inline void splat_tworef_mv(refmvs *r, const ptrdiff_t stride,
const int ref1, const int ref2,
const mv mv1, const mv mv2)
{
const int bw4 = av1_block_dimensions[bs][0];
int bh4 = av1_block_dimensions[bs][1];
const int bw4 = dav1d_block_dimensions[bs][0];
int bh4 = dav1d_block_dimensions[bs][1];
r += by4 * stride + bx4;
const refmvs tmpl = (refmvs) {
......@@ -138,8 +138,8 @@ static inline void splat_intraref(refmvs *r, const ptrdiff_t stride,
const enum BlockSize bs,
const enum IntraPredMode mode)
{
const int bw4 = av1_block_dimensions[bs][0];
int bh4 = av1_block_dimensions[bs][1];
const int bw4 = dav1d_block_dimensions[bs][0];
int bh4 = dav1d_block_dimensions[bs][1];
r += by4 * stride + bx4;
do {
......
......@@ -467,7 +467,7 @@ static const int16_t ALIGN(av1_default_scan_32x32[], 32) = {
892, 861, 830, 799, 831, 862, 893, 924, 955, 986, 1017, 1018, 987, 956, 925, 894, 863, 895, 926, 957, 988, 1019, 1020, 989, 958, 927, 959, 990, 1021, 1022, 991, 1023,
};
const int16_t *const av1_scans[N_RECT_TX_SIZES][3] = {
const int16_t *const dav1d_scans[N_RECT_TX_SIZES][3] = {
[TX_4X4] = {
[TX_CLASS_2D] = av1_default_scan_4x4,
[TX_CLASS_V] = av1_mrow_scan_4x4,
......
......@@ -32,6 +32,6 @@
#include "src/levels.h"
extern const int16_t *const av1_scans[N_RECT_TX_SIZES][3];
extern const int16_t *const dav1d_scans[N_RECT_TX_SIZES][3];