dct-a.asm 9.01 KB
Newer Older
1 2 3 4 5 6 7
;*****************************************************************************
;* dct-a.asm: h264 encoder library
;*****************************************************************************
;* Copyright (C) 2003-2008 x264 project
;*
;* Authors: Laurent Aimar <fenrir@via.ecp.fr>
;*          Loren Merritt <lorenm@u.washington.edu>
8
;*          Min Chen <chenm001.163.com>
9 10 11 12 13 14 15 16 17 18 19 20 21
;*
;* This program is free software; you can redistribute it and/or modify
;* it under the terms of the GNU General Public License as published by
;* the Free Software Foundation; either version 2 of the License, or
;* (at your option) any later version.
;*
;* This program is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
;* GNU General Public License for more details.
;*
;* You should have received a copy of the GNU General Public License
;* along with this program; if not, write to the Free Software
22
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23 24 25
;*****************************************************************************

%include "x86inc.asm"
26
%include "x86util.asm"
27 28 29 30

SECTION_RODATA
pw_1:  times 8 dw 1
pw_32: times 8 dw 32
Loren Merritt's avatar
Loren Merritt committed
31
pb_zigzag4: db 0,1,4,8,5,2,3,6,9,12,13,10,7,11,14,15
32 33 34

SECTION .text

35 36 37 38 39 40
%macro HADAMARD4_1D 4
    SUMSUB_BADC m%2, m%1, m%4, m%3
    SUMSUB_BADC m%4, m%2, m%3, m%1
    SWAP %1, %4, %3
%endmacro

41 42 43
;-----------------------------------------------------------------------------
; void x264_dct4x4dc_mmx( int16_t d[4][4] )
;-----------------------------------------------------------------------------
Loren Merritt's avatar
Loren Merritt committed
44
cglobal x264_dct4x4dc_mmx, 1,1
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
    movq   m0, [r0+ 0]
    movq   m1, [r0+ 8]
    movq   m2, [r0+16]
    movq   m3, [r0+24]
    HADAMARD4_1D  0,1,2,3
    TRANSPOSE4x4W 0,1,2,3,4
    HADAMARD4_1D  0,1,2,3
    movq   m6, [pw_1 GLOBAL]
    paddw  m0, m6
    paddw  m1, m6
    paddw  m2, m6
    paddw  m3, m6
    psraw  m0, 1
    psraw  m1, 1
    psraw  m2, 1
    psraw  m3, 1
    movq  [r0+0], m0
    movq  [r0+8], m1
    movq [r0+16], m2
    movq [r0+24], m3
65 66 67 68 69 70
    RET

;-----------------------------------------------------------------------------
; void x264_idct4x4dc_mmx( int16_t d[4][4] )
;-----------------------------------------------------------------------------
cglobal x264_idct4x4dc_mmx, 1,1
71 72 73 74 75 76 77 78 79 80 81 82
    movq  m0, [r0+ 0]
    movq  m1, [r0+ 8]
    movq  m2, [r0+16]
    movq  m3, [r0+24]
    HADAMARD4_1D  0,1,2,3
    TRANSPOSE4x4W 0,1,2,3,4
    HADAMARD4_1D  0,1,2,3
    movq  [r0+ 0], m0
    movq  [r0+ 8], m1
    movq  [r0+16], m2
    movq  [r0+24], m3
    RET
83

84 85 86 87 88 89
%macro DCT4_1D 5
    SUMSUB_BADC m%4, m%1, m%3, m%2
    SUMSUB_BA   m%3, m%4
    SUMSUB2_AB  m%1, m%2, m%5
    SWAP %1, %3, %4, %5, %2
%endmacro
90

91 92 93 94 95 96
%macro IDCT4_1D 6
    SUMSUB_BA   m%3, m%1
    SUMSUBD2_AB m%2, m%4, m%6, m%5
    SUMSUB_BADC m%2, m%3, m%5, m%1
    SWAP %1, %2, %5, %4, %3
%endmacro
97 98 99 100 101 102

;-----------------------------------------------------------------------------
; void x264_sub4x4_dct_mmx( int16_t dct[4][4], uint8_t *pix1, uint8_t *pix2 )
;-----------------------------------------------------------------------------
cglobal x264_sub4x4_dct_mmx, 3,3
.skip_prologue:
Loren Merritt's avatar
Loren Merritt committed
103
%macro SUB_DCT4 1
104 105 106 107
    LOAD_DIFF  m0, m6, m7, [r1+0*FENC_STRIDE], [r2+0*FDEC_STRIDE]
    LOAD_DIFF  m1, m6, m7, [r1+1*FENC_STRIDE], [r2+1*FDEC_STRIDE]
    LOAD_DIFF  m2, m6, m7, [r1+2*FENC_STRIDE], [r2+2*FDEC_STRIDE]
    LOAD_DIFF  m3, m6, m7, [r1+3*FENC_STRIDE], [r2+3*FDEC_STRIDE]
108
    DCT4_1D 0,1,2,3,4
Loren Merritt's avatar
Loren Merritt committed
109
    TRANSPOSE%1 0,1,2,3,4
110 111 112 113 114
    DCT4_1D 0,1,2,3,4
    movq  [r0+ 0], m0
    movq  [r0+ 8], m1
    movq  [r0+16], m2
    movq  [r0+24], m3
Loren Merritt's avatar
Loren Merritt committed
115 116
%endmacro
    SUB_DCT4 4x4W
117 118 119 120 121
    RET

;-----------------------------------------------------------------------------
; void x264_add4x4_idct_mmx( uint8_t *p_dst, int16_t dct[4][4] )
;-----------------------------------------------------------------------------
Loren Merritt's avatar
Loren Merritt committed
122
cglobal x264_add4x4_idct_mmx, 2,2
123
.skip_prologue:
124 125 126 127
    movq  m0, [r1+ 0]
    movq  m1, [r1+ 8]
    movq  m2, [r1+16]
    movq  m3, [r1+24]
Loren Merritt's avatar
Loren Merritt committed
128
%macro ADD_IDCT4 1
129
    IDCT4_1D 0,1,2,3,4,5
Loren Merritt's avatar
Loren Merritt committed
130 131
    TRANSPOSE%1 0,1,2,3,4
    paddw m0, [pw_32 GLOBAL]
132 133
    IDCT4_1D 0,1,2,3,4,5
    pxor  m7, m7
134 135 136 137
    STORE_DIFF  m0, m4, m7, [r0+0*FDEC_STRIDE]
    STORE_DIFF  m1, m4, m7, [r0+1*FDEC_STRIDE]
    STORE_DIFF  m2, m4, m7, [r0+2*FDEC_STRIDE]
    STORE_DIFF  m3, m4, m7, [r0+3*FDEC_STRIDE]
Loren Merritt's avatar
Loren Merritt committed
138 139
%endmacro
    ADD_IDCT4 4x4W
140 141
    RET

Loren Merritt's avatar
Loren Merritt committed
142
INIT_XMM
143

Loren Merritt's avatar
Loren Merritt committed
144 145 146 147 148 149 150 151 152 153 154 155 156 157
cglobal x264_sub8x8_dct_sse2, 3,3
.skip_prologue:
    call .8x4
    add  r0, 64
    add  r1, 4*FENC_STRIDE
    add  r2, 4*FDEC_STRIDE
.8x4:
    SUB_DCT4 2x4x4W
    movhps [r0+32], m0
    movhps [r0+40], m1
    movhps [r0+48], m2
    movhps [r0+56], m3
    ret

Loren Merritt's avatar
Loren Merritt committed
158
cglobal x264_add8x8_idct_sse2, 2,2
Loren Merritt's avatar
Loren Merritt committed
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
.skip_prologue:
    call .8x4
    add  r1, 64
    add  r0, 4*FDEC_STRIDE
.8x4:
    movq   m0, [r1+ 0]
    movq   m1, [r1+ 8]
    movq   m2, [r1+16]
    movq   m3, [r1+24]
    movhps m0, [r1+32]
    movhps m1, [r1+40]
    movhps m2, [r1+48]
    movhps m3, [r1+56]
    ADD_IDCT4 2x4x4W
    ret
174 175 176 177 178 179 180 181 182

;-----------------------------------------------------------------------------
; void x264_sub8x8_dct_mmx( int16_t dct[4][4][4], uint8_t *pix1, uint8_t *pix2 )
;-----------------------------------------------------------------------------
%macro SUB_NxN_DCT 6
cglobal %1, 3,3
.skip_prologue:
    call %2
    add  r0, %3
Loren Merritt's avatar
Loren Merritt committed
183 184
    add  r1, %4-%5-%6*FENC_STRIDE
    add  r2, %4-%5-%6*FDEC_STRIDE
185 186
    call %2
    add  r0, %3
Loren Merritt's avatar
Loren Merritt committed
187 188
    add  r1, (%4-%6)*FENC_STRIDE-%5-%4
    add  r2, (%4-%6)*FDEC_STRIDE-%5-%4
189 190
    call %2
    add  r0, %3
Loren Merritt's avatar
Loren Merritt committed
191 192
    add  r1, %4-%5-%6*FENC_STRIDE
    add  r2, %4-%5-%6*FDEC_STRIDE
193 194 195 196 197 198 199
    jmp  %2
%endmacro

;-----------------------------------------------------------------------------
; void x264_add8x8_idct_mmx( uint8_t *pix, int16_t dct[4][4][4] )
;-----------------------------------------------------------------------------
%macro ADD_NxN_IDCT 6
Loren Merritt's avatar
Loren Merritt committed
200
cglobal %1, 2,2
201 202
.skip_prologue:
    call %2
Loren Merritt's avatar
Loren Merritt committed
203
    add  r0, %4-%5-%6*FDEC_STRIDE
204 205
    add  r1, %3
    call %2
Loren Merritt's avatar
Loren Merritt committed
206
    add  r0, (%4-%6)*FDEC_STRIDE-%5-%4
207 208
    add  r1, %3
    call %2
Loren Merritt's avatar
Loren Merritt committed
209
    add  r0, %4-%5-%6*FDEC_STRIDE
210 211 212 213
    add  r1, %3
    jmp  %2
%endmacro

214
%ifndef ARCH_X86_64
Loren Merritt's avatar
Loren Merritt committed
215 216 217 218
SUB_NxN_DCT  x264_sub8x8_dct_mmx,    x264_sub4x4_dct_mmx  %+ .skip_prologue, 32, 4, 0, 0
ADD_NxN_IDCT x264_add8x8_idct_mmx,   x264_add4x4_idct_mmx %+ .skip_prologue, 32, 4, 0, 0
SUB_NxN_DCT  x264_sub16x16_dct_mmx,  x264_sub8x8_dct_mmx  %+ .skip_prologue, 32, 8, 4, 4
ADD_NxN_IDCT x264_add16x16_idct_mmx, x264_add8x8_idct_mmx %+ .skip_prologue, 32, 8, 4, 4
219

Loren Merritt's avatar
Loren Merritt committed
220 221
cextern x264_sub8x8_dct8_mmx.skip_prologue
cextern x264_add8x8_idct8_mmx.skip_prologue
Loren Merritt's avatar
Loren Merritt committed
222 223
SUB_NxN_DCT  x264_sub16x16_dct8_mmx,  x264_sub8x8_dct8_mmx  %+ .skip_prologue, 128, 8, 0, 0
ADD_NxN_IDCT x264_add16x16_idct8_mmx, x264_add8x8_idct8_mmx %+ .skip_prologue, 128, 8, 0, 0
Loren Merritt's avatar
Loren Merritt committed
224 225 226 227
%define x264_sub8x8_dct8_sse2 x264_sub8x8_dct8_sse2.skip_prologue
%define x264_add8x8_idct8_sse2 x264_add8x8_idct8_sse2.skip_prologue
%endif

228 229 230
SUB_NxN_DCT  x264_sub16x16_dct_sse2,  x264_sub8x8_dct_sse2  %+ .skip_prologue, 64, 8, 0, 4
ADD_NxN_IDCT x264_add16x16_idct_sse2, x264_add8x8_idct_sse2 %+ .skip_prologue, 64, 8, 0, 4

231 232
cextern x264_sub8x8_dct8_sse2
cextern x264_add8x8_idct8_sse2
Loren Merritt's avatar
Loren Merritt committed
233 234
SUB_NxN_DCT  x264_sub16x16_dct8_sse2,  x264_sub8x8_dct8_sse2,  128, 8, 0, 0
ADD_NxN_IDCT x264_add16x16_idct8_sse2, x264_add8x8_idct8_sse2, 128, 8, 0, 0
235 236


237

238
;-----------------------------------------------------------------------------
239
; void x264_zigzag_scan_4x4_field_mmxext( int16_t level[16], int16_t dct[4][4] )
240
;-----------------------------------------------------------------------------
241 242 243 244 245 246 247 248 249 250 251 252
; sse2 is only 1 cycle faster, and ssse3/pshufb is slower on core2
cglobal x264_zigzag_scan_4x4_field_mmxext, 2,3
    pshufw     mm0, [r1+4], 0xd2
    movq       mm1, [r1+16]
    movq       mm2, [r1+24]
    movq    [r0+4], mm0
    movq   [r0+16], mm1
    movq   [r0+24], mm2
    mov        r2d, [r1]
    mov       [r0], r2d
    mov        r2d, [r1+12]
    mov    [r0+12], r2d
253 254
    RET

Loren Merritt's avatar
Loren Merritt committed
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
;-----------------------------------------------------------------------------
; void x264_zigzag_sub_4x4_frame_ssse3( int16_t level[16], const uint8_t *src, uint8_t *dst )
;-----------------------------------------------------------------------------
cglobal x264_zigzag_sub_4x4_frame_ssse3, 3,3
    movd      xmm0, [r1+0*FENC_STRIDE]
    movd      xmm1, [r1+1*FENC_STRIDE]
    movd      xmm2, [r1+2*FENC_STRIDE]
    movd      xmm3, [r1+3*FENC_STRIDE]
    movd      xmm4, [r2+0*FDEC_STRIDE]
    movd      xmm5, [r2+1*FDEC_STRIDE]
    movd      xmm6, [r2+2*FDEC_STRIDE]
    movd      xmm7, [r2+3*FDEC_STRIDE]
    movd      [r2+0*FDEC_STRIDE], xmm0
    movd      [r2+1*FDEC_STRIDE], xmm1
    movd      [r2+2*FDEC_STRIDE], xmm2
    movd      [r2+3*FDEC_STRIDE], xmm3
    punpckldq xmm0, xmm1
    punpckldq xmm2, xmm3
    punpckldq xmm4, xmm5
    punpckldq xmm6, xmm7
    movlhps   xmm0, xmm2
    movlhps   xmm4, xmm6
    movdqa    xmm7, [pb_zigzag4 GLOBAL]
    pshufb    xmm0, xmm7
    pshufb    xmm4, xmm7
    pxor      xmm6, xmm6
    movdqa    xmm1, xmm0
    movdqa    xmm5, xmm4
    punpcklbw xmm0, xmm6
    punpckhbw xmm1, xmm6
    punpcklbw xmm4, xmm6
    punpckhbw xmm5, xmm6
    psubw     xmm0, xmm4
    psubw     xmm1, xmm5
    movdqa    [r0], xmm0
    movdqa [r0+16], xmm1
    RET