macroblock.c 45.5 KB
Newer Older
1
/*****************************************************************************
2
 * macroblock.c: macroblock encoding
3
 *****************************************************************************
Sean McGovern's avatar
Sean McGovern committed
4
 * Copyright (C) 2003-2011 x264 project
5 6
 *
 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7 8
 *          Loren Merritt <lorenm@u.washington.edu>
 *          Fiona Glaser <fiona@x264.com>
9 10 11 12 13 14 15 16 17 18 19 20 21
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
22
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23 24 25
 *
 * This program is also available under a commercial proprietary license.
 * For more information, contact us at licensing@x264.com.
26 27
 *****************************************************************************/

28
#include "common/common.h"
29 30
#include "macroblock.h"

Fiona Glaser's avatar
Fiona Glaser committed
31 32
/* These chroma DC functions don't have assembly versions and are only used here. */

33
#define ZIG(i,y,x) level[i] = dct[x*2+y];
34
static inline void zigzag_scan_2x2_dc( dctcoef level[4], dctcoef dct[4] )
35
{
36 37 38 39
    ZIG(0,0,0)
    ZIG(1,0,1)
    ZIG(2,1,0)
    ZIG(3,1,1)
40
}
41
#undef ZIG
42

43
#define IDCT_DEQUANT_START \
44 45 46 47
    int d0 = dct[0] + dct[1]; \
    int d1 = dct[2] + dct[3]; \
    int d2 = dct[0] - dct[1]; \
    int d3 = dct[2] - dct[3]; \
48
    int dmf = dequant_mf[i_qp%6][0] << i_qp/6;
49

50
static inline void idct_dequant_2x2_dc( dctcoef dct[4], dctcoef dct4x4[4][16], int dequant_mf[6][16], int i_qp )
Fiona Glaser's avatar
Fiona Glaser committed
51
{
52
    IDCT_DEQUANT_START
53 54 55 56
    dct4x4[0][0] = (d0 + d1) * dmf >> 5;
    dct4x4[1][0] = (d0 - d1) * dmf >> 5;
    dct4x4[2][0] = (d2 + d3) * dmf >> 5;
    dct4x4[3][0] = (d2 - d3) * dmf >> 5;
Fiona Glaser's avatar
Fiona Glaser committed
57 58
}

59
static inline void idct_dequant_2x2_dconly( dctcoef out[4], dctcoef dct[4], int dequant_mf[6][16], int i_qp )
60 61
{
    IDCT_DEQUANT_START
62 63 64 65
    out[0] = (d0 + d1) * dmf >> 5;
    out[1] = (d0 - d1) * dmf >> 5;
    out[2] = (d2 + d3) * dmf >> 5;
    out[3] = (d2 - d3) * dmf >> 5;
66 67
}

68
static inline void dct2x2dc( dctcoef d[4], dctcoef dct4x4[4][16] )
Fiona Glaser's avatar
Fiona Glaser committed
69
{
70 71 72 73 74 75 76 77 78 79 80 81
    int d0 = dct4x4[0][0] + dct4x4[1][0];
    int d1 = dct4x4[2][0] + dct4x4[3][0];
    int d2 = dct4x4[0][0] - dct4x4[1][0];
    int d3 = dct4x4[2][0] - dct4x4[3][0];
    d[0] = d0 + d1;
    d[2] = d2 + d3;
    d[1] = d0 - d1;
    d[3] = d2 - d3;
    dct4x4[0][0] = 0;
    dct4x4[1][0] = 0;
    dct4x4[2][0] = 0;
    dct4x4[3][0] = 0;
Fiona Glaser's avatar
Fiona Glaser committed
82 83
}

Anton Mitrofanov's avatar
Anton Mitrofanov committed
84
static ALWAYS_INLINE int x264_quant_4x4( x264_t *h, dctcoef dct[16], int i_qp, int ctx_block_cat, int b_intra, int idx )
85 86
{
    int i_quant_cat = b_intra ? CQM_4IY : CQM_4PY;
Fiona Glaser's avatar
Fiona Glaser committed
87 88
    if( h->mb.b_noise_reduction && ctx_block_cat != DCT_LUMA_AC )
        h->quantf.denoise_dct( dct, h->nr_residual_sum[0], h->nr_offset[0], 16 );
89
    if( h->mb.b_trellis )
Anton Mitrofanov's avatar
Anton Mitrofanov committed
90
        return x264_quant_4x4_trellis( h, dct, i_quant_cat, i_qp, ctx_block_cat, b_intra, 0, idx );
91
    else
92
        return h->quantf.quant_4x4( dct, h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias[i_quant_cat][i_qp] );
93 94
}

95
static ALWAYS_INLINE int x264_quant_8x8( x264_t *h, dctcoef dct[64], int i_qp, int b_intra, int idx )
96 97
{
    int i_quant_cat = b_intra ? CQM_8IY : CQM_8PY;
Fiona Glaser's avatar
Fiona Glaser committed
98 99
    if( h->mb.b_noise_reduction )
        h->quantf.denoise_dct( dct, h->nr_residual_sum[1], h->nr_offset[1], 64 );
100
    if( h->mb.b_trellis )
101
        return x264_quant_8x8_trellis( h, dct, i_quant_cat, i_qp, b_intra, idx );
102
    else
103
        return h->quantf.quant_8x8( dct, h->quant8_mf[i_quant_cat][i_qp], h->quant8_bias[i_quant_cat][i_qp] );
104 105
}

106 107 108 109 110 111 112 113 114 115 116
/* All encoding functions must output the correct CBP and NNZ values.
 * The entropy coding functions will check CBP first, then NNZ, before
 * actually reading the DCT coefficients.  NNZ still must be correct even
 * if CBP is zero because of the use of NNZ values for context selection.
 * "NNZ" need only be 0 or 1 rather than the exact coefficient count because
 * that is only needed in CAVLC, and will be calculated by CAVLC's residual
 * coding and stored as necessary. */

/* This means that decimation can be done merely by adjusting the CBP and NNZ
 * rather than memsetting the coefficients. */

117
void x264_mb_encode_i4x4( x264_t *h, int idx, int i_qp )
118
{
119
    int nz;
120 121
    pixel *p_src = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[idx]];
    pixel *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[idx]];
122
    ALIGNED_ARRAY_16( dctcoef, dct4x4,[16] );
123

124 125
    if( h->mb.b_lossless )
    {
126
        nz = h->zigzagf.sub_4x4( h->dct.luma4x4[idx], p_src, p_dst );
127 128
        h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
        h->mb.i_cbp_luma |= nz<<(idx>>2);
129 130 131
        return;
    }

132
    h->dctf.sub4x4_dct( dct4x4, p_src, p_dst );
Loren Merritt's avatar
Loren Merritt committed
133

134 135 136
    nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 1, idx );
    h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
    if( nz )
137
    {
138
        h->mb.i_cbp_luma |= 1<<(idx>>2);
139
        h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4 );
140
        h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4IY], i_qp );
141 142
        h->dctf.add4x4_idct( p_dst, dct4x4 );
    }
143 144
}

145 146
#define STORE_8x8_NNZ( s8, nz )\
do\
147
{\
148 149 150
    M16( &h->mb.cache.non_zero_count[(s8) + 0*8] ) = (nz) * 0x0101;\
    M16( &h->mb.cache.non_zero_count[(s8) + 1*8] ) = (nz) * 0x0101;\
} while(0)
151 152 153 154 155 156 157

#define CLEAR_16x16_NNZ \
{\
    M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = 0;\
    M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = 0;\
    M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = 0;\
    M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = 0;\
158 159
}

160
void x264_mb_encode_i8x8( x264_t *h, int idx, int i_qp )
161
{
162 163 164
    int x = idx&1;
    int y = idx>>1;
    int s8 = X264_SCAN8_0 + 2*x + 16*y;
165
    int nz;
166 167
    pixel *p_src = &h->mb.pic.p_fenc[0][8*x + 8*y*FENC_STRIDE];
    pixel *p_dst = &h->mb.pic.p_fdec[0][8*x + 8*y*FDEC_STRIDE];
168
    ALIGNED_ARRAY_16( dctcoef, dct8x8,[64] );
169

170 171
    if( h->mb.b_lossless )
    {
172
        nz = h->zigzagf.sub_8x8( h->dct.luma8x8[idx], p_src, p_dst );
173
        STORE_8x8_NNZ( s8, nz );
174
        h->mb.i_cbp_luma |= nz<<idx;
175 176 177
        return;
    }

178
    h->dctf.sub8x8_dct8( dct8x8, p_src, p_dst );
Loren Merritt's avatar
Loren Merritt committed
179

180 181 182 183
    nz = x264_quant_8x8( h, dct8x8, i_qp, 1, idx );
    if( nz )
    {
        h->mb.i_cbp_luma |= 1<<idx;
184
        h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8 );
185 186
        h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8IY], i_qp );
        h->dctf.add8x8_idct8( p_dst, dct8x8 );
187
        STORE_8x8_NNZ( s8, 1 );
188 189
    }
    else
190
        STORE_8x8_NNZ( s8, 0 );
191 192
}

193
static void x264_mb_encode_i16x16( x264_t *h, int i_qp )
194
{
195 196
    pixel *p_src = h->mb.pic.p_fenc[0];
    pixel *p_dst = h->mb.pic.p_fdec[0];
197

198 199
    ALIGNED_ARRAY_16( dctcoef, dct4x4,[16],[16] );
    ALIGNED_ARRAY_16( dctcoef, dct_dc4x4,[16] );
200

201
    int nz;
202
    int decimate_score = h->mb.b_dct_decimate ? 0 : 9;
203

204 205
    if( h->mb.b_lossless )
    {
206
        for( int i = 0; i < 16; i++ )
207
        {
208 209
            int oe = block_idx_xy_fenc[i];
            int od = block_idx_xy_fdec[i];
210
            nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[i], p_src+oe, p_dst+od, &dct_dc4x4[block_idx_yx_1d[i]] );
211 212
            h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
            h->mb.i_cbp_luma |= nz;
213
        }
214 215
        h->mb.i_cbp_luma *= 0xf;
        h->mb.cache.non_zero_count[x264_scan8[24]] = array_non_zero( dct_dc4x4 );
216
        h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
217 218 219
        return;
    }

220
    h->dctf.sub16x16_dct( dct4x4, p_src, p_dst );
221

222
    for( int i = 0; i < 16; i++ )
223 224
    {
        /* copy dc coeff */
Fiona Glaser's avatar
Fiona Glaser committed
225 226
        if( h->mb.b_noise_reduction )
            h->quantf.denoise_dct( dct4x4[i], h->nr_residual_sum[0], h->nr_offset[0], 16 );
227 228
        dct_dc4x4[block_idx_xy_1d[i]] = dct4x4[i][0];
        dct4x4[i][0] = 0;
229 230

        /* quant/scan/dequant */
231 232 233 234 235 236
        nz = x264_quant_4x4( h, dct4x4[i], i_qp, DCT_LUMA_AC, 1, i );
        h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
        if( nz )
        {
            h->zigzagf.scan_4x4( h->dct.luma4x4[i], dct4x4[i] );
            h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IY], i_qp );
237
            if( decimate_score < 6 ) decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[i] );
238 239
            h->mb.i_cbp_luma = 0xf;
        }
240 241
    }

242 243 244 245 246
    /* Writing the 16 CBFs in an i16x16 block is quite costly, so decimation can save many bits. */
    /* More useful with CAVLC, but still useful with CABAC. */
    if( decimate_score < 6 )
    {
        h->mb.i_cbp_luma = 0;
247
        CLEAR_16x16_NNZ
248 249
    }

250
    h->dctf.dct4x4dc( dct_dc4x4 );
251
    if( h->mb.b_trellis )
252
        nz = x264_quant_dc_trellis( h, dct_dc4x4, CQM_4IY, i_qp, DCT_LUMA_DC, 1, 0 );
253
    else
254
        nz = h->quantf.quant_4x4_dc( dct_dc4x4, h->quant4_mf[CQM_4IY][i_qp][0]>>1, h->quant4_bias[CQM_4IY][i_qp][0]<<1 );
255

256 257
    h->mb.cache.non_zero_count[x264_scan8[24]] = nz;
    if( nz )
258
    {
259 260 261 262 263 264
        h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );

        /* output samples to fdec */
        h->dctf.idct4x4dc( dct_dc4x4 );
        h->quantf.dequant_4x4_dc( dct_dc4x4, h->dequant4_mf[CQM_4IY], i_qp );  /* XXX not inversed */
        if( h->mb.i_cbp_luma )
265
            for( int i = 0; i < 16; i++ )
266
                dct4x4[i][0] = dct_dc4x4[block_idx_xy_1d[i]];
267
    }
268

269
    /* put pixels to fdec */
270 271 272 273
    if( h->mb.i_cbp_luma )
        h->dctf.add16x16_idct( p_dst, dct4x4 );
    else if( nz )
        h->dctf.add16x16_idct_dc( p_dst, dct_dc4x4 );
274 275
}

276 277 278 279
/* Round down coefficients losslessly in DC-only chroma blocks.
 * Unlike luma blocks, this can't be done with a lookup table or
 * other shortcut technique because of the interdependencies
 * between the coefficients due to the chroma DC transform. */
280
static ALWAYS_INLINE int x264_mb_optimize_chroma_dc( x264_t *h, dctcoef dct2x2[4], int dequant_mf[6][16], int i_qp )
281
{
282
    int dmf = dequant_mf[i_qp%6][0] << i_qp/6;
283 284

    /* If the QP is too high, there's no benefit to rounding optimization. */
285
    if( dmf > 32*64 )
286 287
        return 1;

288
    return h->quantf.optimize_chroma_dc( dct2x2, dmf );
289 290
}

291
void x264_mb_encode_8x8_chroma( x264_t *h, int b_inter, int i_qp )
292
{
293
    int nz, nz_dc;
294
    int b_decimate = b_inter && h->mb.b_dct_decimate;
295
    ALIGNED_ARRAY_16( dctcoef, dct2x2,[4] );
296
    h->mb.i_cbp_chroma = 0;
Fiona Glaser's avatar
Fiona Glaser committed
297
    h->nr_count[2] += h->mb.b_noise_reduction * 4;
298

299 300 301
    /* Early termination: check variance of chroma residual before encoding.
     * Don't bother trying early termination at low QPs.
     * Values are experimentally derived. */
Fiona Glaser's avatar
Fiona Glaser committed
302
    if( b_decimate && i_qp >= (h->mb.b_trellis ? 12 : 18) && !h->mb.b_noise_reduction )
303 304 305
    {
        int thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
        int ssd[2];
306 307
        int score = h->pixf.var2_8x8( h->mb.pic.p_fenc[1], FENC_STRIDE, h->mb.pic.p_fdec[1], FDEC_STRIDE, &ssd[0] );
        if( score < thresh*4 )
308 309 310 311 312 313 314 315 316 317 318
            score += h->pixf.var2_8x8( h->mb.pic.p_fenc[2], FENC_STRIDE, h->mb.pic.p_fdec[2], FDEC_STRIDE, &ssd[1] );
        if( score < thresh*4 )
        {
            h->mb.cache.non_zero_count[x264_scan8[16]] = 0;
            h->mb.cache.non_zero_count[x264_scan8[17]] = 0;
            h->mb.cache.non_zero_count[x264_scan8[18]] = 0;
            h->mb.cache.non_zero_count[x264_scan8[19]] = 0;
            h->mb.cache.non_zero_count[x264_scan8[20]] = 0;
            h->mb.cache.non_zero_count[x264_scan8[21]] = 0;
            h->mb.cache.non_zero_count[x264_scan8[22]] = 0;
            h->mb.cache.non_zero_count[x264_scan8[23]] = 0;
319 320
            M16( &h->mb.cache.non_zero_count[x264_scan8[25]] ) = 0;

321
            for( int ch = 0; ch < 2; ch++ )
322 323 324 325 326
            {
                if( ssd[ch] > thresh )
                {
                    h->dctf.sub8x8_dct_dc( dct2x2, h->mb.pic.p_fenc[1+ch], h->mb.pic.p_fdec[1+ch] );
                    if( h->mb.b_trellis )
327
                        nz_dc = x264_quant_dc_trellis( h, dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter, 1 );
328
                    else
329
                        nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<1 );
330

331 332
                    if( nz_dc )
                    {
333
                        if( !x264_mb_optimize_chroma_dc( h, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp ) )
334
                            continue;
335 336
                        h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 1;
                        zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
337
                        idct_dequant_2x2_dconly( dct2x2, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
338 339 340 341 342 343 344 345 346
                        h->dctf.add8x8_idct_dc( h->mb.pic.p_fdec[1+ch], dct2x2 );
                        h->mb.i_cbp_chroma = 1;
                    }
                }
            }
            return;
        }
    }

347
    for( int ch = 0; ch < 2; ch++ )
348
    {
349 350
        pixel *p_src = h->mb.pic.p_fenc[1+ch];
        pixel *p_dst = h->mb.pic.p_fdec[1+ch];
351
        int i_decimate_score = 0;
352
        int nz_ac = 0;
353

354
        ALIGNED_ARRAY_16( dctcoef, dct4x4,[4],[16] );
355

356 357
        if( h->mb.b_lossless )
        {
358
            for( int i = 0; i < 4; i++ )
359
            {
360 361
                int oe = block_idx_x[i]*4 + block_idx_y[i]*4*FENC_STRIDE;
                int od = block_idx_x[i]*4 + block_idx_y[i]*4*FDEC_STRIDE;
362
                nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i+ch*4], p_src+oe, p_dst+od, &h->dct.chroma_dc[ch][i] );
363 364
                h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
                h->mb.i_cbp_chroma |= nz;
365
            }
366
            h->mb.cache.non_zero_count[x264_scan8[25]+ch] = array_non_zero( h->dct.chroma_dc[ch] );
367 368
            continue;
        }
Loren Merritt's avatar
Loren Merritt committed
369

370
        h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
Fiona Glaser's avatar
Fiona Glaser committed
371 372 373
        if( h->mb.b_noise_reduction )
            for( int i = 0; i < 4; i++ )
                h->quantf.denoise_dct( dct4x4[i], h->nr_residual_sum[2], h->nr_offset[2], 16 );
Fiona Glaser's avatar
Fiona Glaser committed
374
        dct2x2dc( dct2x2, dct4x4 );
375
        /* calculate dct coeffs */
376
        for( int i = 0; i < 4; i++ )
377
        {
378
            if( h->mb.b_trellis )
379
                nz = x264_quant_4x4_trellis( h, dct4x4[i], CQM_4IC+b_inter, i_qp, DCT_CHROMA_AC, !b_inter, 1, 0 );
380
            else
381 382 383 384 385 386 387 388 389 390
                nz = h->quantf.quant_4x4( dct4x4[i], h->quant4_mf[CQM_4IC+b_inter][i_qp], h->quant4_bias[CQM_4IC+b_inter][i_qp] );
            h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
            if( nz )
            {
                nz_ac = 1;
                h->zigzagf.scan_4x4( h->dct.luma4x4[16+i+ch*4], dct4x4[i] );
                h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IC + b_inter], i_qp );
                if( b_decimate )
                    i_decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[16+i+ch*4] );
            }
391 392
        }

393
        if( h->mb.b_trellis )
394
            nz_dc = x264_quant_dc_trellis( h, dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter, 1 );
395
        else
396
            nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<1 );
397

398 399 400
        h->mb.cache.non_zero_count[x264_scan8[25]+ch] = nz_dc;

        if( (b_decimate && i_decimate_score < 7) || !nz_ac )
401
        {
402 403 404 405 406
            /* Decimate the block */
            h->mb.cache.non_zero_count[x264_scan8[16+0]+24*ch] = 0;
            h->mb.cache.non_zero_count[x264_scan8[16+1]+24*ch] = 0;
            h->mb.cache.non_zero_count[x264_scan8[16+2]+24*ch] = 0;
            h->mb.cache.non_zero_count[x264_scan8[16+3]+24*ch] = 0;
407
            if( !nz_dc ) /* Whole block is empty */
408
                continue;
409
            if( !x264_mb_optimize_chroma_dc( h, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp  ) )
410 411 412 413
            {
                h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 0;
                continue;
            }
414 415
            /* DC-only */
            zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
416
            idct_dequant_2x2_dconly( dct2x2, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
417
            h->dctf.add8x8_idct_dc( p_dst, dct2x2 );
418
        }
419 420
        else
        {
421 422
            h->mb.i_cbp_chroma = 1;
            if( nz_dc )
423
            {
424 425
                zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
                idct_dequant_2x2_dc( dct2x2, dct4x4, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
426 427
            }
            h->dctf.add8x8_idct( p_dst, dct4x4 );
428
        }
429
    }
430

431 432
    /* 0 = none, 1 = DC only, 2 = DC+AC */
    h->mb.i_cbp_chroma = ((!!M16( &h->mb.cache.non_zero_count[x264_scan8[25]] )) | h->mb.i_cbp_chroma) + h->mb.i_cbp_chroma;
433 434
}

435 436
static void x264_macroblock_encode_skip( x264_t *h )
{
Fiona Glaser's avatar
Fiona Glaser committed
437 438 439 440 441 442
    M32( &h->mb.cache.non_zero_count[x264_scan8[0]+0*8] ) = 0;
    M32( &h->mb.cache.non_zero_count[x264_scan8[0]+1*8] ) = 0;
    M32( &h->mb.cache.non_zero_count[x264_scan8[0]+2*8] ) = 0;
    M32( &h->mb.cache.non_zero_count[x264_scan8[0]+3*8] ) = 0;
    for( int i = 16; i < 24; i++ )
        h->mb.cache.non_zero_count[x264_scan8[i]] = 0;
Henrik Gramner's avatar
Henrik Gramner committed
443 444
    h->mb.i_cbp_luma = 0;
    h->mb.i_cbp_chroma = 0;
445 446 447
    h->mb.cbp[h->mb.i_mb_xy] = 0;
}

448 449 450 451
/*****************************************************************************
 * x264_macroblock_encode_pskip:
 *  Encode an already marked skip block
 *****************************************************************************/
452
static void x264_macroblock_encode_pskip( x264_t *h )
453
{
454
    /* don't do pskip motion compensation if it was already done in macroblock_analyse */
455
    if( !h->mb.b_skip_mc )
456
    {
457 458 459 460 461
        int mvx = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][0],
                              h->mb.mv_min[0], h->mb.mv_max[0] );
        int mvy = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][1],
                              h->mb.mv_min[1], h->mb.mv_max[1] );

462 463
        h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
                       h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
464
                       mvx, mvy, 16, 16, &h->sh.weight[0][0] );
465

466 467
        /* Special case for mv0, which is (of course) very common in P-skip mode. */
        if( mvx | mvy )
468
            h->mc.mc_chroma( h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[2], FDEC_STRIDE,
469 470 471
                             h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1],
                             mvx, mvy, 8, 8 );
        else
472
            h->mc.load_deinterleave_8x8x2_fdec( h->mb.pic.p_fdec[1], h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1] );
473

474 475 476 477 478 479 480 481
        if( h->sh.weight[0][1].weightfn )
            h->sh.weight[0][1].weightfn[8>>2]( h->mb.pic.p_fdec[1], FDEC_STRIDE,
                                               h->mb.pic.p_fdec[1], FDEC_STRIDE,
                                               &h->sh.weight[0][1], 8 );
        if( h->sh.weight[0][2].weightfn )
            h->sh.weight[0][2].weightfn[8>>2]( h->mb.pic.p_fdec[2], FDEC_STRIDE,
                                               h->mb.pic.p_fdec[2], FDEC_STRIDE,
                                               &h->sh.weight[0][2], 8 );
482
    }
483

484
    x264_macroblock_encode_skip( h );
485 486
}

487 488 489 490 491 492 493 494
/*****************************************************************************
 * Intra prediction for predictive lossless mode.
 *****************************************************************************/

void x264_predict_lossless_8x8_chroma( x264_t *h, int i_mode )
{
    if( i_mode == I_PRED_CHROMA_V )
    {
495 496 497 498
        h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc[1]-FENC_STRIDE, FENC_STRIDE, 8 );
        h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc[2]-FENC_STRIDE, FENC_STRIDE, 8 );
        memcpy( h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[1]-FDEC_STRIDE, 8*sizeof(pixel) );
        memcpy( h->mb.pic.p_fdec[2], h->mb.pic.p_fdec[2]-FDEC_STRIDE, 8*sizeof(pixel) );
499 500 501
    }
    else if( i_mode == I_PRED_CHROMA_H )
    {
502 503 504 505
        h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc[1]-1, FENC_STRIDE, 8 );
        h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc[2]-1, FENC_STRIDE, 8 );
        x264_copy_column8( h->mb.pic.p_fdec[1]+4*FDEC_STRIDE, h->mb.pic.p_fdec[1]+4*FDEC_STRIDE-1 );
        x264_copy_column8( h->mb.pic.p_fdec[2]+4*FDEC_STRIDE, h->mb.pic.p_fdec[2]+4*FDEC_STRIDE-1 );
506 507 508 509 510 511 512 513
    }
    else
    {
        h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
        h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
    }
}

514
void x264_predict_lossless_4x4( x264_t *h, pixel *p_dst, int idx, int i_mode )
515 516
{
    int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
517
    pixel *p_src = h->mb.pic.p_fenc_plane[0] + block_idx_x[idx]*4 + block_idx_y[idx]*4 * stride;
518 519 520 521 522 523 524 525 526

    if( i_mode == I_PRED_4x4_V )
        h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-stride, stride, 4 );
    else if( i_mode == I_PRED_4x4_H )
        h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-1, stride, 4 );
    else
        h->predict_4x4[i_mode]( p_dst );
}

527
void x264_predict_lossless_8x8( x264_t *h, pixel *p_dst, int idx, int i_mode, pixel edge[33] )
528 529
{
    int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
530
    pixel *p_src = h->mb.pic.p_fenc_plane[0] + (idx&1)*8 + (idx>>1)*8*stride;
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550

    if( i_mode == I_PRED_8x8_V )
        h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-stride, stride, 8 );
    else if( i_mode == I_PRED_8x8_H )
        h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-1, stride, 8 );
    else
        h->predict_8x8[i_mode]( p_dst, edge );
}

void x264_predict_lossless_16x16( x264_t *h, int i_mode )
{
    int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
    if( i_mode == I_PRED_16x16_V )
        h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-stride, stride, 16 );
    else if( i_mode == I_PRED_16x16_H )
        h->mc.copy_16x16_unaligned( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-1, stride, 16 );
    else
        h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
}

551 552 553 554 555
/*****************************************************************************
 * x264_macroblock_encode:
 *****************************************************************************/
void x264_macroblock_encode( x264_t *h )
{
556
    int i_qp = h->mb.i_qp;
557
    int b_decimate = h->mb.b_dct_decimate;
558
    int b_force_no_skip = 0;
559
    int nz;
560 561
    h->mb.i_cbp_luma = 0;
    h->mb.cache.non_zero_count[x264_scan8[24]] = 0;
562

Fiona Glaser's avatar
Fiona Glaser committed
563 564 565 566 567 568 569 570 571
    if( h->mb.i_type == I_PCM )
    {
        /* if PCM is chosen, we need to store reconstructed frame data */
        h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc[0], FENC_STRIDE, 16 );
        h->mc.copy[PIXEL_8x8]  ( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc[1], FENC_STRIDE, 8 );
        h->mc.copy[PIXEL_8x8]  ( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc[2], FENC_STRIDE, 8 );
        return;
    }

572
    if( !h->mb.b_allow_skip )
573 574 575 576 577 578 579 580 581 582 583
    {
        b_force_no_skip = 1;
        if( IS_SKIP(h->mb.i_type) )
        {
            if( h->mb.i_type == P_SKIP )
                h->mb.i_type = P_L0;
            else if( h->mb.i_type == B_SKIP )
                h->mb.i_type = B_DIRECT;
        }
    }

584 585 586 587 588 589
    if( h->mb.i_type == P_SKIP )
    {
        /* A bit special */
        x264_macroblock_encode_pskip( h );
        return;
    }
590 591
    if( h->mb.i_type == B_SKIP )
    {
592
        /* don't do bskip motion compensation if it was already done in macroblock_analyse */
593
        if( !h->mb.b_skip_mc )
594
            x264_mb_mc( h );
595 596 597
        x264_macroblock_encode_skip( h );
        return;
    }
598 599 600 601

    if( h->mb.i_type == I_16x16 )
    {
        const int i_mode = h->mb.i_intra16x16_pred_mode;
602
        h->mb.b_transform_8x8 = 0;
603 604 605 606 607

        if( h->mb.b_lossless )
            x264_predict_lossless_16x16( h, i_mode );
        else
            h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
608 609

        /* encode the 16x16 macroblock */
610
        x264_mb_encode_i16x16( h, i_qp );
611
    }
612 613
    else if( h->mb.i_type == I_8x8 )
    {
614
        ALIGNED_ARRAY_16( pixel, edge,[33] );
615
        h->mb.b_transform_8x8 = 1;
616 617 618 619
        /* If we already encoded 3 of the 4 i8x8 blocks, we don't have to do them again. */
        if( h->mb.i_skip_intra )
        {
            h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i8x8_fdec_buf, 16, 16 );
620 621 622 623
            M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i8x8_nnz_buf[0];
            M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i8x8_nnz_buf[1];
            M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i8x8_nnz_buf[2];
            M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i8x8_nnz_buf[3];
624
            h->mb.i_cbp_luma = h->mb.pic.i8x8_cbp;
625 626 627 628
            /* In RD mode, restore the now-overwritten DCT data. */
            if( h->mb.i_skip_intra == 2 )
                h->mc.memcpy_aligned( h->dct.luma8x8, h->mb.pic.i8x8_dct_buf, sizeof(h->mb.pic.i8x8_dct_buf) );
        }
629
        for( int i = h->mb.i_skip_intra ? 3 : 0 ; i < 4; i++ )
630
        {
631 632
            pixel *p_dst = &h->mb.pic.p_fdec[0][8 * (i&1) + 8 * (i>>1) * FDEC_STRIDE];
            int i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[4*i]];
633
            h->predict_8x8_filter( p_dst, edge, h->mb.i_neighbour8[i], x264_pred_i4x4_neighbors[i_mode] );
634 635 636 637 638 639

            if( h->mb.b_lossless )
                x264_predict_lossless_8x8( h, p_dst, i, i_mode, edge );
            else
                h->predict_8x8[i_mode]( p_dst, edge );

640
            x264_mb_encode_i8x8( h, i, i_qp );
641 642
        }
    }
643 644
    else if( h->mb.i_type == I_4x4 )
    {
645
        h->mb.b_transform_8x8 = 0;
646 647 648 649
        /* If we already encoded 15 of the 16 i4x4 blocks, we don't have to do them again. */
        if( h->mb.i_skip_intra )
        {
            h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i4x4_fdec_buf, 16, 16 );
650 651 652 653
            M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i4x4_nnz_buf[0];
            M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i4x4_nnz_buf[1];
            M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i4x4_nnz_buf[2];
            M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i4x4_nnz_buf[3];
654
            h->mb.i_cbp_luma = h->mb.pic.i4x4_cbp;
655 656
            /* In RD mode, restore the now-overwritten DCT data. */
            if( h->mb.i_skip_intra == 2 )
657
                h->mc.memcpy_aligned( h->dct.luma4x4, h->mb.pic.i4x4_dct_buf, sizeof(h->mb.pic.i4x4_dct_buf) );
658
        }
659
        for( int i = h->mb.i_skip_intra ? 15 : 0 ; i < 16; i++ )
660
        {
661 662
            pixel *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i]];
            int i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[i]];
663

664 665
            if( (h->mb.i_neighbour4[i] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
                /* emulate missing topright samples */
666
                MPIXEL_X4( &p_dst[4-FDEC_STRIDE] ) = PIXEL_SPLAT_X4( p_dst[3-FDEC_STRIDE] );
667

668 669 670 671
            if( h->mb.b_lossless )
                x264_predict_lossless_4x4( h, p_dst, i, i_mode );
            else
                h->predict_4x4[i_mode]( p_dst );
672
            x264_mb_encode_i4x4( h, i, i_qp );
673 674 675 676 677 678
        }
    }
    else    /* Inter MB */
    {
        int i_decimate_mb = 0;

679 680 681
        /* Don't repeat motion compensation if it was already done in non-RD transform analysis */
        if( !h->mb.b_skip_mc )
            x264_mb_mc( h );
682

683 684
        if( h->mb.b_lossless )
        {
685
            if( h->mb.b_transform_8x8 )
686
                for( int i8x8 = 0; i8x8 < 4; i8x8++ )
687
                {
688 689 690 691 692 693 694
                    int x = i8x8&1;
                    int y = i8x8>>1;
                    int s8 = X264_SCAN8_0 + 2*x + 16*y;

                    nz = h->zigzagf.sub_8x8( h->dct.luma8x8[i8x8], h->mb.pic.p_fenc[0] + 8*x + 8*y*FENC_STRIDE,
                                                                   h->mb.pic.p_fdec[0] + 8*x + 8*y*FDEC_STRIDE );
                    STORE_8x8_NNZ( s8, nz );
695
                    h->mb.i_cbp_luma |= nz << i8x8;
696 697
                }
            else
698
                for( int i4x4 = 0; i4x4 < 16; i4x4++ )
699
                {
700
                    nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4x4],
701 702
                                        h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4x4],
                                        h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4x4] );
703 704
                    h->mb.cache.non_zero_count[x264_scan8[i4x4]] = nz;
                    h->mb.i_cbp_luma |= nz << (i4x4>>2);
705
                }
706 707
        }
        else if( h->mb.b_transform_8x8 )
708
        {
709
            ALIGNED_ARRAY_16( dctcoef, dct8x8,[4],[64] );
Fiona Glaser's avatar
Fiona Glaser committed
710
            b_decimate &= !h->mb.b_trellis || !h->param.b_cabac; // 8x8 trellis is inherently optimal decimation for CABAC
711
            h->dctf.sub16x16_dct8( dct8x8, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
Fiona Glaser's avatar
Fiona Glaser committed
712
            h->nr_count[1] += h->mb.b_noise_reduction * 4;
713

714
            for( int idx = 0; idx < 4; idx++ )
715
            {
716
                nz = x264_quant_8x8( h, dct8x8[idx], i_qp, 0, idx );
717

718
                if( nz )
719
                {
720 721 722 723 724 725 726 727 728 729
                    h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8[idx] );
                    if( b_decimate )
                    {
                        int i_decimate_8x8 = h->quantf.decimate_score64( h->dct.luma8x8[idx] );
                        i_decimate_mb += i_decimate_8x8;
                        if( i_decimate_8x8 >= 4 )
                            h->mb.i_cbp_luma |= 1<<idx;
                    }
                    else
                        h->mb.i_cbp_luma |= 1<<idx;
730
                }
731 732
            }

733
            if( i_decimate_mb < 6 && b_decimate )
734 735
            {
                h->mb.i_cbp_luma = 0;
736
                CLEAR_16x16_NNZ
737
            }
738
            else
739
            {
740
                for( int idx = 0; idx < 4; idx++ )
741
                {
742 743 744 745
                    int x = idx&1;
                    int y = idx>>1;
                    int s8 = X264_SCAN8_0 + 2*x + 16*y;

746
                    if( h->mb.i_cbp_luma&(1<<idx) )
747 748
                    {
                        h->quantf.dequant_8x8( dct8x8[idx], h->dequant8_mf[CQM_8PY], i_qp );
749 750
                        h->dctf.add8x8_idct8( &h->mb.pic.p_fdec[0][8*x + 8*y*FDEC_STRIDE], dct8x8[idx] );
                        STORE_8x8_NNZ( s8, 1 );
751
                    }
752
                    else
753
                        STORE_8x8_NNZ( s8, 0 );
754
                }
755
            }
756 757 758
        }
        else
        {
759
            ALIGNED_ARRAY_16( dctcoef, dct4x4,[16],[16] );
760
            h->dctf.sub16x16_dct( dct4x4, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
Fiona Glaser's avatar
Fiona Glaser committed
761
            h->nr_count[0] += h->mb.b_noise_reduction * 16;
762

763
            for( int i8x8 = 0; i8x8 < 4; i8x8++ )
764
            {
765 766
                int i_decimate_8x8 = 0;
                int cbp = 0;
767 768

                /* encode one 4x4 block */
769
                for( int i4x4 = 0; i4x4 < 4; i4x4++ )
770
                {
771
                    int idx = i8x8 * 4 + i4x4;
772

773 774
                    nz = x264_quant_4x4( h, dct4x4[idx], i_qp, DCT_LUMA_4x4, 0, idx );
                    h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
Loren Merritt's avatar
Loren Merritt committed
775

776 777 778 779 780 781 782 783
                    if( nz )
                    {
                        h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4[idx] );
                        h->quantf.dequant_4x4( dct4x4[idx], h->dequant4_mf[CQM_4PY], i_qp );
                        if( b_decimate && i_decimate_8x8 < 6 )
                            i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[idx] );
                        cbp = 1;
                    }
784 785
                }

786 787 788
                int x = i8x8&1;
                int y = i8x8>>1;

789 790
                /* decimate this 8x8 block */
                i_decimate_mb += i_decimate_8x8;
791 792 793
                if( b_decimate )
                {
                    if( i_decimate_8x8 < 4 )
794 795 796 797
                    {
                        int s8 = X264_SCAN8_0 + 2*x + 16*y;
                        STORE_8x8_NNZ( s8, 0 );
                    }
798 799 800 801 802
                    else
                        h->mb.i_cbp_luma |= 1<<i8x8;
                }
                else if( cbp )
                {
803
                    h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][8*x + 8*y*FDEC_STRIDE], &dct4x4[i8x8*4] );
804 805
                    h->mb.i_cbp_luma |= 1<<i8x8;
                }
806
            }
807

808
            if( b_decimate )
809
            {
810 811 812
                if( i_decimate_mb < 6 )
                {
                    h->mb.i_cbp_luma = 0;
813
                    CLEAR_16x16_NNZ
814 815 816
                }
                else
                {
817
                    for( int i8x8 = 0; i8x8 < 4; i8x8++ )
818 819 820
                        if( h->mb.i_cbp_luma&(1<<i8x8) )
                            h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
                }
821
            }
822 823 824 825 826 827 828
        }
    }

    /* encode chroma */
    if( IS_INTRA( h->mb.i_type ) )
    {
        const int i_mode = h->mb.i_chroma_pred_mode;
829 830 831 832 833 834 835
        if( h->mb.b_lossless )
            x264_predict_lossless_8x8_chroma( h, i_mode );
        else
        {
            h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
            h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
        }
836 837 838
    }

    /* encode the 8x8 blocks */
839
    x264_mb_encode_8x8_chroma( h, !IS_INTRA( h->mb.i_type ), h->mb.i_chroma_qp );
840 841

    /* store cbp */
842 843 844 845 846 847
    int cbp = h->mb.i_cbp_chroma << 4 | h->mb.i_cbp_luma;
    if( h->param.b_cabac )
        cbp |= h->mb.cache.non_zero_count[x264_scan8[24]] << 8
            |  h->mb.cache.non_zero_count[x264_scan8[25]] << 9
            |  h->mb.cache.non_zero_count[x264_scan8[26]] << 10;
    h->mb.cbp[h->mb.i_mb_xy] = cbp;
848 849 850 851

    /* Check for P_SKIP
     * XXX: in the me perhaps we should take x264_mb_predict_mv_pskip into account
     *      (if multiple mv give same result)*/
852
    if( !b_force_no_skip )
853
    {
854
        if( h->mb.i_type == P_L0 && h->mb.i_partition == D_16x16 &&
Loren Merritt's avatar
Loren Merritt committed
855
            !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) &&
856
            M32( h->mb.cache.mv[0][x264_scan8[0]] ) == M32( h->mb.cache.pskip_mv )
857
            && h->mb.cache.ref[0][x264_scan8[0]] == 0 )
858
        {
859
            h->mb.i_type = P_SKIP;
860
        }
861

862
        /* Check for B_SKIP */
863
        if( h->mb.i_type == B_DIRECT && !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) )
864 865 866
        {
            h->mb.i_type = B_SKIP;
        }
867
    }
868 869 870
}

/*****************************************************************************
871
 * x264_macroblock_probe_skip:
872
 *  Check if the current MB could be encoded as a [PB]_SKIP
873
 *****************************************************************************/
874
int x264_macroblock_probe_skip( x264_t *h, int b_bidir )
875
{
876 877 878
    ALIGNED_ARRAY_16( dctcoef, dct4x4,[4],[16] );
    ALIGNED_ARRAY_16( dctcoef, dct2x2,[4] );
    ALIGNED_ARRAY_16( dctcoef, dctscan,[16] );
879
    ALIGNED_4( int16_t mvp[2] );
880

881
    int i_qp = h->mb.i_qp;
882
    int thresh, ssd;
883

884 885 886
    if( !b_bidir )
    {
        /* Get the MV */
887 888
        mvp[0] = x264_clip3( h->mb.cache.pskip_mv[0], h->mb.mv_min[0], h->mb.mv_max[0] );
        mvp[1] = x264_clip3( h->mb.cache.pskip_mv[1], h->mb.mv_min[1], h->mb.mv_max[1] );
889

890
        /* Motion compensation */
891 892
        h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
                       h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
893
                       mvp[0], mvp[1], 16, 16, &h->sh.weight[0][0] );
894
    }
895

896
    for( int i8x8 = 0, i_decimate_mb = 0; i8x8 < 4; i8x8++ )
897
    {
Fiona Glaser's avatar
Fiona Glaser committed
898 899 900 901 902
        int fenc_offset = (i8x8&1) * 8 + (i8x8>>1) * FENC_STRIDE * 8;
        int fdec_offset = (i8x8&1) * 8 + (i8x8>>1) * FDEC_STRIDE * 8;
        /* get luma diff */
        h->dctf.sub8x8_dct( dct4x4, h->mb.pic.p_fenc[0] + fenc_offset,
                                    h->mb.pic.p_fdec[0] + fdec_offset );
903
        /* encode one 4x4 block */
904
        for( int i4x4 = 0; i4x4 < 4; i4x4++ )
905
        {
Fiona Glaser's avatar
Fiona Glaser committed
906 907
            if( h->mb.b_noise_reduction )
                h->quantf.denoise_dct( dct4x4[i4x4], h->nr_residual_sum[0], h->nr_offset[0], 16 );
908
            if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PY][i_qp], h->quant4_bias[CQM_4PY][i_qp] ) )
Fiona Glaser's avatar
Fiona Glaser committed
909 910
                continue;
            h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
911
            i_decimate_mb += h->quantf.decimate_score16( dctscan );
912 913 914 915 916 917
            if( i_decimate_mb >= 6 )
                return 0;
        }
    }

    /* encode chroma */
918
    i_qp = h->mb.i_chroma_qp;
919
    thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
920

921 922 923 924 925 926 927 928 929 930 931
    if( !b_bidir )
    {
        /* Special case for mv0, which is (of course) very common in P-skip mode. */
        if( M32( mvp ) )
            h->mc.mc_chroma( h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[2], FDEC_STRIDE,
                             h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1],
                             mvp[0], mvp[1], 8, 8 );
        else
            h->mc.load_deinterleave_8x8x2_fdec( h->mb.pic.p_fdec[1], h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1] );
    }

932
    for( int ch = 0; ch < 2; ch++ )
933
    {
934 935
        pixel *p_src = h->mb.pic.p_fenc[1+ch];
        pixel *p_dst = h->mb.pic.p_fdec[1+ch];
936

937 938 939 940
        if( !b_bidir && h->sh.weight[0][1+ch].weightfn )
            h->sh.weight[0][1+ch].weightfn[8>>2]( h->mb.pic.p_fdec[1+ch], FDEC_STRIDE,
                                                  h->mb.pic.p_fdec[1+ch], FDEC_STRIDE,
                                                  &h->sh.weight[0][1+ch], 8 );
941

942 943
        /* there is almost never a termination during chroma, but we can't avoid the check entirely */
        /* so instead we check SSD and skip the actual check if the score is low enough. */
Fiona Glaser's avatar
Fiona Glaser committed
944 945
        ssd = h->pixf.ssd[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE );
        if( ssd < thresh )
946 947
            continue;

948 949
        /* The vast majority of chroma checks will terminate during the DC check or the higher
         * threshold check, so we can save time by doing a DC-only DCT. */
Fiona Glaser's avatar
Fiona Glaser committed
950 951 952 953 954 955 956 957 958 959 960
        if( h->mb.b_noise_reduction )
        {
            h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
            for( int i4x4 = 0; i4x4 < 4; i4x4++ )
            {
                h->quantf.denoise_dct( dct4x4[i4x4], h->nr_residual_sum[2], h->nr_offset[2], 16 );
                dct2x2[i4x4] = dct4x4[i4x4][0];
            }
        }
        else
            h->dctf.sub8x8_dct_dc( dct2x2, p_src, p_dst );
961

962
        if( h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4PC][i_qp][0]>>1, h->quant4_bias[CQM_4PC][i_qp][0]<<1 ) )
963 964
            return 0;

Fiona Glaser's avatar
Fiona Glaser committed
965 966 967 968
        /* If there wasn't a termination in DC, we can check against a much higher threshold. */
        if( ssd < thresh*4 )
            continue;

Fiona Glaser's avatar
Fiona Glaser committed
969 970
        if( !h->mb.b_noise_reduction )
            h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
971

972
        /* calculate dct coeffs */
973
        for( int i4x4 = 0, i_decimate_mb = 0; i4x4 < 4; i4x4++ )
974
        {
975
            dct4x4[i4x4][0] = 0;
Fiona Glaser's avatar
Fiona Glaser committed
976 977
            if( h->mb.b_noise_reduction )
                h->quantf.denoise_dct( dct4x4[i4x4], h->nr_residual_sum[2], h->nr_offset[2], 16 );
978
            if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] ) )
Fiona Glaser's avatar
Fiona Glaser committed
979
                continue;
980
            h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
981
            i_decimate_mb += h->quantf.decimate_score15( dctscan );
982 983 984 985 986
            if( i_decimate_mb >= 7 )
                return 0;
        }
    }

987
    h->mb.b_skip_mc = 1;
988 989
    return 1;
}
990 991 992 993 994 995 996 997

/****************************************************************************
 * DCT-domain noise reduction / adaptive deadzone
 * from libavcodec
 ****************************************************************************/

void x264_noise_reduction_update( x264_t *h )
{
Fiona Glaser's avatar
Fiona Glaser committed
998 999 1000 1001
    h->nr_offset = h->nr_offset_denoise;
    h->nr_residual_sum = h->nr_residual_sum_buf[0];
    h->nr_count = h->nr_count_buf[0];
    for( int cat = 0; cat < 3; cat++ )
1002
    {
Fiona Glaser's avatar
Fiona Glaser committed
1003 1004 1005
        int dct8x8 = cat == 1;
        int size = dct8x8 ? 64 : 16;
        const uint16_t *weight = dct8x8 ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
1006

Fiona Glaser's avatar
Fiona Glaser committed
1007
        if( h->nr_count[cat] > (dct8x8 ? (1<<16) : (1<<18)) )
1008
        {
1009
            for( int i = 0; i < size; i++ )
1010 1011 1012 1013
                h->nr_residual_sum[cat][i] >>= 1;
            h->nr_count[cat] >>= 1;
        }

1014
        for( int i = 0; i < size; i++ )
1015 1016 1017 1018
            h->nr_offset[cat][i] =
                ((uint64_t)h->param.analyse.i_noise_reduction * h->nr_count[cat]
                 + h->nr_residual_sum[cat][i]/2)
              / ((uint64_t)h->nr_residual_sum[cat][i] * weight[i]/256 + 1);
Fiona Glaser's avatar
Fiona Glaser committed
1019 1020 1021

        /* Don't denoise DC coefficients */
        h->nr_offset[cat][0] = 0;
1022 1023 1024
    }
}

1025 1026 1027 1028 1029 1030 1031
/*****************************************************************************
 * RD only; 4 calls to this do not make up for one macroblock_encode.
 * doesn't transform chroma dc.
 *****************************************************************************/
void x264_macroblock_encode_p8x8( x264_t *h, int i8 )
{
    int i_qp = h->mb.i_qp;
1032 1033 1034
    int x = i8&1;
    int y = i8>>1;
    int s8 = X264_SCAN8_0 + 2*x + 16*y;
1035 1036
    pixel *p_fenc = h->mb.pic.p_fenc[0] + 8*x + 8*y*FENC_STRIDE;
    pixel *p_fdec = h->mb.pic.p_fdec[0] + 8*x + 8*y*FDEC_STRIDE;