dct-a.asm 10.3 KB
Newer Older
1 2 3 4 5 6 7
;*****************************************************************************
;* dct-a.asm: h264 encoder library
;*****************************************************************************
;* Copyright (C) 2003-2008 x264 project
;*
;* Authors: Laurent Aimar <fenrir@via.ecp.fr>
;*          Loren Merritt <lorenm@u.washington.edu>
8
;*          Min Chen <chenm001.163.com>
9 10 11 12 13 14 15 16 17 18 19 20 21
;*
;* This program is free software; you can redistribute it and/or modify
;* it under the terms of the GNU General Public License as published by
;* the Free Software Foundation; either version 2 of the License, or
;* (at your option) any later version.
;*
;* This program is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
;* GNU General Public License for more details.
;*
;* You should have received a copy of the GNU General Public License
;* along with this program; if not, write to the Free Software
22
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23 24 25 26 27 28 29
;*****************************************************************************

%include "x86inc.asm"

SECTION_RODATA
pw_1:  times 8 dw 1
pw_32: times 8 dw 32
Loren Merritt's avatar
Loren Merritt committed
30
pb_zigzag4: db 0,1,4,8,5,2,3,6,9,12,13,10,7,11,14,15
31 32 33 34

SECTION .text

%macro LOAD_DIFF_4P 5
Loren Merritt's avatar
Loren Merritt committed
35
    movh        %1, %4
36
    punpcklbw   %1, %3
Loren Merritt's avatar
Loren Merritt committed
37
    movh        %2, %5
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
    punpcklbw   %2, %3
    psubw       %1, %2
%endmacro

%macro SUMSUB_BA 2
    paddw   %1, %2
    paddw   %2, %2
    psubw   %2, %1
%endmacro

%macro SUMSUB_BADC 4
    paddw   %1, %2
    paddw   %3, %4
    paddw   %2, %2
    paddw   %4, %4
    psubw   %2, %1
    psubw   %4, %3
%endmacro

%macro SUMSUB2_AB 3
Loren Merritt's avatar
Loren Merritt committed
58
    mova    %3, %1
59 60 61 62 63 64 65
    paddw   %1, %1
    paddw   %1, %2
    psubw   %3, %2
    psubw   %3, %2
%endmacro

%macro SUMSUBD2_AB 4
Loren Merritt's avatar
Loren Merritt committed
66 67
    mova    %4, %1
    mova    %3, %2
68 69 70 71 72 73
    psraw   %2, 1
    psraw   %4, 1
    paddw   %1, %2
    psubw   %4, %3
%endmacro

74 75 76 77 78
%macro SBUTTERFLY 4
    mova       m%4, m%2
    punpckl%1  m%2, m%3
    punpckh%1  m%4, m%3
    SWAP %3, %4
79 80 81
%endmacro

%macro TRANSPOSE4x4W 5
82 83 84 85 86
    SBUTTERFLY wd, %1, %2, %5
    SBUTTERFLY wd, %3, %4, %5
    SBUTTERFLY dq, %1, %3, %5
    SBUTTERFLY dq, %2, %4, %5
    SWAP %2, %3
87 88
%endmacro

Loren Merritt's avatar
Loren Merritt committed
89 90 91 92 93 94 95 96 97 98
%macro TRANSPOSE2x4x4W 5
    SBUTTERFLY wd, %1, %2, %5
    SBUTTERFLY wd, %3, %4, %5
    SBUTTERFLY dq, %1, %3, %5
    SBUTTERFLY dq, %2, %4, %5
    SBUTTERFLY qdq, %1, %2, %5
    SBUTTERFLY qdq, %3, %4, %5
%endmacro

%macro STORE_DIFF_4P 4
99
    psraw       %1, 6
Loren Merritt's avatar
Loren Merritt committed
100 101
    movh        %2, %4
    punpcklbw   %2, %3
102 103
    paddsw      %1, %2
    packuswb    %1, %1
Loren Merritt's avatar
Loren Merritt committed
104
    movh        %4, %1
105 106
%endmacro

107 108 109 110 111 112
%macro HADAMARD4_1D 4
    SUMSUB_BADC m%2, m%1, m%4, m%3
    SUMSUB_BADC m%4, m%2, m%3, m%1
    SWAP %1, %4, %3
%endmacro

113 114 115 116
;-----------------------------------------------------------------------------
; void x264_dct4x4dc_mmx( int16_t d[4][4] )
;-----------------------------------------------------------------------------
cglobal x264_dct4x4dc_mmx, 1,1,1
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
    movq   m0, [r0+ 0]
    movq   m1, [r0+ 8]
    movq   m2, [r0+16]
    movq   m3, [r0+24]
    HADAMARD4_1D  0,1,2,3
    TRANSPOSE4x4W 0,1,2,3,4
    HADAMARD4_1D  0,1,2,3
    movq   m6, [pw_1 GLOBAL]
    paddw  m0, m6
    paddw  m1, m6
    paddw  m2, m6
    paddw  m3, m6
    psraw  m0, 1
    psraw  m1, 1
    psraw  m2, 1
    psraw  m3, 1
    movq  [r0+0], m0
    movq  [r0+8], m1
    movq [r0+16], m2
    movq [r0+24], m3
137 138 139 140 141 142
    RET

;-----------------------------------------------------------------------------
; void x264_idct4x4dc_mmx( int16_t d[4][4] )
;-----------------------------------------------------------------------------
cglobal x264_idct4x4dc_mmx, 1,1
143 144 145 146 147 148 149 150 151 152 153 154
    movq  m0, [r0+ 0]
    movq  m1, [r0+ 8]
    movq  m2, [r0+16]
    movq  m3, [r0+24]
    HADAMARD4_1D  0,1,2,3
    TRANSPOSE4x4W 0,1,2,3,4
    HADAMARD4_1D  0,1,2,3
    movq  [r0+ 0], m0
    movq  [r0+ 8], m1
    movq  [r0+16], m2
    movq  [r0+24], m3
    RET
155

156 157 158 159 160 161
%macro DCT4_1D 5
    SUMSUB_BADC m%4, m%1, m%3, m%2
    SUMSUB_BA   m%3, m%4
    SUMSUB2_AB  m%1, m%2, m%5
    SWAP %1, %3, %4, %5, %2
%endmacro
162

163 164 165 166 167 168
%macro IDCT4_1D 6
    SUMSUB_BA   m%3, m%1
    SUMSUBD2_AB m%2, m%4, m%6, m%5
    SUMSUB_BADC m%2, m%3, m%5, m%1
    SWAP %1, %2, %5, %4, %3
%endmacro
169 170 171 172 173 174

;-----------------------------------------------------------------------------
; void x264_sub4x4_dct_mmx( int16_t dct[4][4], uint8_t *pix1, uint8_t *pix2 )
;-----------------------------------------------------------------------------
cglobal x264_sub4x4_dct_mmx, 3,3
.skip_prologue:
Loren Merritt's avatar
Loren Merritt committed
175
%macro SUB_DCT4 1
176 177 178 179 180
    LOAD_DIFF_4P  m0, m6, m7, [r1+0*FENC_STRIDE], [r2+0*FDEC_STRIDE]
    LOAD_DIFF_4P  m1, m6, m7, [r1+1*FENC_STRIDE], [r2+1*FDEC_STRIDE]
    LOAD_DIFF_4P  m2, m6, m7, [r1+2*FENC_STRIDE], [r2+2*FDEC_STRIDE]
    LOAD_DIFF_4P  m3, m6, m7, [r1+3*FENC_STRIDE], [r2+3*FDEC_STRIDE]
    DCT4_1D 0,1,2,3,4
Loren Merritt's avatar
Loren Merritt committed
181
    TRANSPOSE%1 0,1,2,3,4
182 183 184 185 186
    DCT4_1D 0,1,2,3,4
    movq  [r0+ 0], m0
    movq  [r0+ 8], m1
    movq  [r0+16], m2
    movq  [r0+24], m3
Loren Merritt's avatar
Loren Merritt committed
187 188
%endmacro
    SUB_DCT4 4x4W
189 190 191 192 193 194 195
    RET

;-----------------------------------------------------------------------------
; void x264_add4x4_idct_mmx( uint8_t *p_dst, int16_t dct[4][4] )
;-----------------------------------------------------------------------------
cglobal x264_add4x4_idct_mmx, 2,2,1
.skip_prologue:
196 197 198 199
    movq  m0, [r1+ 0]
    movq  m1, [r1+ 8]
    movq  m2, [r1+16]
    movq  m3, [r1+24]
Loren Merritt's avatar
Loren Merritt committed
200
%macro ADD_IDCT4 1
201
    IDCT4_1D 0,1,2,3,4,5
Loren Merritt's avatar
Loren Merritt committed
202 203
    TRANSPOSE%1 0,1,2,3,4
    paddw m0, [pw_32 GLOBAL]
204 205
    IDCT4_1D 0,1,2,3,4,5
    pxor  m7, m7
Loren Merritt's avatar
Loren Merritt committed
206 207 208 209 210 211
    STORE_DIFF_4P  m0, m4, m7, [r0+0*FDEC_STRIDE]
    STORE_DIFF_4P  m1, m4, m7, [r0+1*FDEC_STRIDE]
    STORE_DIFF_4P  m2, m4, m7, [r0+2*FDEC_STRIDE]
    STORE_DIFF_4P  m3, m4, m7, [r0+3*FDEC_STRIDE]
%endmacro
    ADD_IDCT4 4x4W
212 213
    RET

Loren Merritt's avatar
Loren Merritt committed
214
INIT_XMM
215

Loren Merritt's avatar
Loren Merritt committed
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
cglobal x264_sub8x8_dct_sse2, 3,3
.skip_prologue:
    call .8x4
    add  r0, 64
    add  r1, 4*FENC_STRIDE
    add  r2, 4*FDEC_STRIDE
.8x4:
    SUB_DCT4 2x4x4W
    movhps [r0+32], m0
    movhps [r0+40], m1
    movhps [r0+48], m2
    movhps [r0+56], m3
    ret

cglobal x264_add8x8_idct_sse2, 2,2,1
.skip_prologue:
    call .8x4
    add  r1, 64
    add  r0, 4*FDEC_STRIDE
.8x4:
    movq   m0, [r1+ 0]
    movq   m1, [r1+ 8]
    movq   m2, [r1+16]
    movq   m3, [r1+24]
    movhps m0, [r1+32]
    movhps m1, [r1+40]
    movhps m2, [r1+48]
    movhps m3, [r1+56]
    ADD_IDCT4 2x4x4W
    ret
246 247 248 249 250 251 252 253 254

;-----------------------------------------------------------------------------
; void x264_sub8x8_dct_mmx( int16_t dct[4][4][4], uint8_t *pix1, uint8_t *pix2 )
;-----------------------------------------------------------------------------
%macro SUB_NxN_DCT 6
cglobal %1, 3,3
.skip_prologue:
    call %2
    add  r0, %3
Loren Merritt's avatar
Loren Merritt committed
255 256
    add  r1, %4-%5-%6*FENC_STRIDE
    add  r2, %4-%5-%6*FDEC_STRIDE
257 258
    call %2
    add  r0, %3
Loren Merritt's avatar
Loren Merritt committed
259 260
    add  r1, (%4-%6)*FENC_STRIDE-%5-%4
    add  r2, (%4-%6)*FDEC_STRIDE-%5-%4
261 262
    call %2
    add  r0, %3
Loren Merritt's avatar
Loren Merritt committed
263 264
    add  r1, %4-%5-%6*FENC_STRIDE
    add  r2, %4-%5-%6*FDEC_STRIDE
265 266 267 268 269 270 271 272 273 274
    jmp  %2
%endmacro

;-----------------------------------------------------------------------------
; void x264_add8x8_idct_mmx( uint8_t *pix, int16_t dct[4][4][4] )
;-----------------------------------------------------------------------------
%macro ADD_NxN_IDCT 6
cglobal %1, 2,2,1
.skip_prologue:
    call %2
Loren Merritt's avatar
Loren Merritt committed
275
    add  r0, %4-%5-%6*FDEC_STRIDE
276 277
    add  r1, %3
    call %2
Loren Merritt's avatar
Loren Merritt committed
278
    add  r0, (%4-%6)*FDEC_STRIDE-%5-%4
279 280
    add  r1, %3
    call %2
Loren Merritt's avatar
Loren Merritt committed
281
    add  r0, %4-%5-%6*FDEC_STRIDE
282 283 284 285
    add  r1, %3
    jmp  %2
%endmacro

286
%ifndef ARCH_X86_64
Loren Merritt's avatar
Loren Merritt committed
287 288 289 290
SUB_NxN_DCT  x264_sub8x8_dct_mmx,    x264_sub4x4_dct_mmx  %+ .skip_prologue, 32, 4, 0, 0
ADD_NxN_IDCT x264_add8x8_idct_mmx,   x264_add4x4_idct_mmx %+ .skip_prologue, 32, 4, 0, 0
SUB_NxN_DCT  x264_sub16x16_dct_mmx,  x264_sub8x8_dct_mmx  %+ .skip_prologue, 32, 8, 4, 4
ADD_NxN_IDCT x264_add16x16_idct_mmx, x264_add8x8_idct_mmx %+ .skip_prologue, 32, 8, 4, 4
291

Loren Merritt's avatar
Loren Merritt committed
292 293
cextern x264_sub8x8_dct8_mmx.skip_prologue
cextern x264_add8x8_idct8_mmx.skip_prologue
Loren Merritt's avatar
Loren Merritt committed
294 295
SUB_NxN_DCT  x264_sub16x16_dct8_mmx,  x264_sub8x8_dct8_mmx  %+ .skip_prologue, 128, 8, 0, 0
ADD_NxN_IDCT x264_add16x16_idct8_mmx, x264_add8x8_idct8_mmx %+ .skip_prologue, 128, 8, 0, 0
Loren Merritt's avatar
Loren Merritt committed
296 297 298 299
%define x264_sub8x8_dct8_sse2 x264_sub8x8_dct8_sse2.skip_prologue
%define x264_add8x8_idct8_sse2 x264_add8x8_idct8_sse2.skip_prologue
%endif

300 301 302
SUB_NxN_DCT  x264_sub16x16_dct_sse2,  x264_sub8x8_dct_sse2  %+ .skip_prologue, 64, 8, 0, 4
ADD_NxN_IDCT x264_add16x16_idct_sse2, x264_add8x8_idct_sse2 %+ .skip_prologue, 64, 8, 0, 4

303 304
cextern x264_sub8x8_dct8_sse2
cextern x264_add8x8_idct8_sse2
Loren Merritt's avatar
Loren Merritt committed
305 306
SUB_NxN_DCT  x264_sub16x16_dct8_sse2,  x264_sub8x8_dct8_sse2,  128, 8, 0, 0
ADD_NxN_IDCT x264_add16x16_idct8_sse2, x264_add8x8_idct8_sse2, 128, 8, 0, 0
307 308


309

310
;-----------------------------------------------------------------------------
311
; void x264_zigzag_scan_4x4_field_mmxext( int16_t level[16], int16_t dct[4][4] )
312
;-----------------------------------------------------------------------------
313 314 315 316 317 318 319 320 321 322 323 324
; sse2 is only 1 cycle faster, and ssse3/pshufb is slower on core2
cglobal x264_zigzag_scan_4x4_field_mmxext, 2,3
    pshufw     mm0, [r1+4], 0xd2
    movq       mm1, [r1+16]
    movq       mm2, [r1+24]
    movq    [r0+4], mm0
    movq   [r0+16], mm1
    movq   [r0+24], mm2
    mov        r2d, [r1]
    mov       [r0], r2d
    mov        r2d, [r1+12]
    mov    [r0+12], r2d
325 326
    RET

Loren Merritt's avatar
Loren Merritt committed
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
;-----------------------------------------------------------------------------
; void x264_zigzag_sub_4x4_frame_ssse3( int16_t level[16], const uint8_t *src, uint8_t *dst )
;-----------------------------------------------------------------------------
cglobal x264_zigzag_sub_4x4_frame_ssse3, 3,3
    movd      xmm0, [r1+0*FENC_STRIDE]
    movd      xmm1, [r1+1*FENC_STRIDE]
    movd      xmm2, [r1+2*FENC_STRIDE]
    movd      xmm3, [r1+3*FENC_STRIDE]
    movd      xmm4, [r2+0*FDEC_STRIDE]
    movd      xmm5, [r2+1*FDEC_STRIDE]
    movd      xmm6, [r2+2*FDEC_STRIDE]
    movd      xmm7, [r2+3*FDEC_STRIDE]
    movd      [r2+0*FDEC_STRIDE], xmm0
    movd      [r2+1*FDEC_STRIDE], xmm1
    movd      [r2+2*FDEC_STRIDE], xmm2
    movd      [r2+3*FDEC_STRIDE], xmm3
    picgetgot r1
    punpckldq xmm0, xmm1
    punpckldq xmm2, xmm3
    punpckldq xmm4, xmm5
    punpckldq xmm6, xmm7
    movlhps   xmm0, xmm2
    movlhps   xmm4, xmm6
    movdqa    xmm7, [pb_zigzag4 GLOBAL]
    pshufb    xmm0, xmm7
    pshufb    xmm4, xmm7
    pxor      xmm6, xmm6
    movdqa    xmm1, xmm0
    movdqa    xmm5, xmm4
    punpcklbw xmm0, xmm6
    punpckhbw xmm1, xmm6
    punpcklbw xmm4, xmm6
    punpckhbw xmm5, xmm6
    psubw     xmm0, xmm4
    psubw     xmm1, xmm5
    movdqa    [r0], xmm0
    movdqa [r0+16], xmm1
    RET