Commit bfbee860 authored by Nathan Egge's avatar Nathan Egge Committed by Nathan Egge
Browse files

x86: cdef: Add SIMD implementation of cdef_dir for 16bpc

Relative speed-ups over C code (compared with gcc-9.3.0):

                                           C       ASM
cdef_dir_16bpc_avx2:                   534.2      72.5     7.36x
cdef_dir_16bpc_ssse3:                  534.2     104.8     5.10x
cdef_dir_16bpc_ssse3 (x86-32):         854.1     116.2     7.35x
parent ec95ea52
Pipeline #69819 passed with stages
in 4 minutes and 56 seconds
......@@ -184,20 +184,20 @@ if is_asm_enabled
libdav1d_sources_asm = files(
'x86/cpuid.asm',
'x86/msac.asm',
'x86/cdef_avx2.asm',
'x86/cdef_sse.asm',
)
if dav1d_bitdepths.contains('8')
libdav1d_sources_asm += files(
'x86/cdef_avx512.asm',
'x86/mc_avx512.asm',
'x86/cdef_avx2.asm',
'x86/mc_avx2.asm',
'x86/film_grain.asm',
'x86/ipred.asm',
'x86/itx.asm',
'x86/loopfilter.asm',
'x86/looprestoration.asm',
'x86/cdef_sse.asm',
'x86/film_grain_ssse3.asm',
'x86/ipred_ssse3.asm',
'x86/itx_ssse3.asm',
......@@ -209,6 +209,8 @@ if is_asm_enabled
if dav1d_bitdepths.contains('16')
libdav1d_sources_asm += files(
'x86/cdef16_avx2.asm',
'x86/cdef16_sse.asm',
)
endif
......
; Copyright (c) 2017-2021, The rav1e contributors
; Copyright (c) 2021, Nathan Egge
; All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
; modification, are permitted provided that the following conditions are met:
;
; 1. Redistributions of source code must retain the above copyright notice, this
; list of conditions and the following disclaimer.
;
; 2. Redistributions in binary form must reproduce the above copyright notice,
; this list of conditions and the following disclaimer in the documentation
; and/or other materials provided with the distribution.
;
; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
; ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
; WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
; ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
; (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
; ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
%include "config.asm"
%include "ext/x86/x86inc.asm"
%if ARCH_X86_64
SECTION .text
cextern cdef_dir_8bpc_avx2
INIT_YMM avx2
cglobal cdef_dir_16bpc, 4, 4, 3, 32 + 8*8, src, ss, var, bdmax
popcnt bdmaxd, bdmaxd
movzx bdmaxq, bdmaxw
sub bdmaxq, 8
movq xm2, bdmaxq
DEFINE_ARGS src, ss, var, ss3
lea ss3q, [ssq*3]
mova xm0, [srcq + ssq*0]
mova xm1, [srcq + ssq*1]
vinserti128 m0, [srcq + ssq*2], 1
vinserti128 m1, [srcq + ss3q], 1
psraw m0, xm2
psraw m1, xm2
vpackuswb m0, m1
mova [rsp + 32 + 0*8], m0
lea srcq, [srcq + ssq*4]
mova xm0, [srcq + ssq*0]
mova xm1, [srcq + ssq*1]
vinserti128 m0, [srcq + ssq*2], 1
vinserti128 m1, [srcq + ss3q], 1
psraw m0, xm2
psraw m1, xm2
vpackuswb m0, m1
mova [rsp + 32 + 4*8], m0
lea srcq, [rsp + 32] ; WIN64 shadow space
mov ssq, 8
call mangle(private_prefix %+ _cdef_dir_8bpc %+ SUFFIX)
RET
%endif ; ARCH_X86_64
; Copyright (c) 2017-2021, The rav1e contributors
; Copyright (c) 2021, Nathan Egge
; All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
; modification, are permitted provided that the following conditions are met:
;
; 1. Redistributions of source code must retain the above copyright notice, this
; list of conditions and the following disclaimer.
;
; 2. Redistributions in binary form must reproduce the above copyright notice,
; this list of conditions and the following disclaimer in the documentation
; and/or other materials provided with the distribution.
;
; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
; ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
; WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
; ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
; (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
; ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
%include "config.asm"
%include "ext/x86/x86inc.asm"
%ifn ARCH_X86_64
SECTION_RODATA 16
pq_dir_shr: dq 2, 4
%endif
SECTION .text
cextern cdef_dir_8bpc_ssse3
INIT_XMM ssse3
cglobal cdef_dir_16bpc, 2, 4, 4, 32 + 8*8, src, ss, var, bdmax
bsr bdmaxd, bdmaxm
%if ARCH_X86_64
movzx bdmaxq, bdmaxw
sub bdmaxq, 7
movq m4, bdmaxq
%else
push r4
sub bdmaxd, 9
LEA r4, pq_dir_shr
movq m4, [r4 + bdmaxd*4]
pop r4
%endif
DEFINE_ARGS src, ss, var, ss3
lea ss3q, [ssq*3]
mova m0, [srcq + ssq*0]
mova m1, [srcq + ssq*1]
mova m2, [srcq + ssq*2]
mova m3, [srcq + ss3q]
psraw m0, m4
psraw m1, m4
psraw m2, m4
psraw m3, m4
packuswb m0, m1
packuswb m2, m3
mova [rsp + 32 + 0*8], m0
mova [rsp + 32 + 2*8], m2
lea srcq, [srcq + ssq*4]
mova m0, [srcq + ssq*0]
mova m1, [srcq + ssq*1]
mova m2, [srcq + ssq*2]
mova m3, [srcq + ss3q]
psraw m0, m4
psraw m1, m4
psraw m2, m4
psraw m3, m4
packuswb m0, m1
packuswb m2, m3
mova [rsp + 32 + 4*8], m0
mova [rsp + 32 + 6*8], m2
lea srcq, [rsp + 32] ; WIN64 shadow space
mov ssq, 8
%if ARCH_X86_64
call mangle(private_prefix %+ _cdef_dir_8bpc %+ SUFFIX)
%else
movifnidn vard, varm
push eax ; align stack
push vard
push ssd
push srcd
call mangle(private_prefix %+ _cdef_dir_8bpc)
add esp, 0x10
%endif
RET
......@@ -270,8 +270,7 @@ DECLARE_REG_TMP 2, 7
; L8 L9 40 41 42 43 44 45 8e 8f 80 81 82 83 84 85
; La Lb 50 51 52 53 54 55 9e 9f 90 91 92 93 94 95
cglobal cdef_filter_4x8_8bpc, 4, 9, 22, dst, stride, left, top, \
pri, sec, dir, damping, edge
cglobal cdef_filter_4x8_8bpc, 4, 9, 22, dst, stride, left, top, pri, sec, dir, damping, edge
%define base r8-edge_mask
vpbroadcastd ym21, strided
mov r6d, edgem
......
......@@ -40,11 +40,12 @@ decl_cdef_fns(sse4);
decl_cdef_fns(ssse3);
decl_cdef_fns(sse2);
decl_cdef_dir_fn(BF(dav1d_cdef_dir, avx2));
decl_cdef_dir_fn(BF(dav1d_cdef_dir, sse4));
decl_cdef_dir_fn(BF(dav1d_cdef_dir, ssse3));
#endif
decl_cdef_dir_fn(BF(dav1d_cdef_dir, avx2));
decl_cdef_dir_fn(BF(dav1d_cdef_dir, ssse3));
COLD void bitfn(dav1d_cdef_dsp_init_x86)(Dav1dCdefDSPContext *const c) {
const unsigned flags = dav1d_get_cpu_flags();
......@@ -58,8 +59,9 @@ COLD void bitfn(dav1d_cdef_dsp_init_x86)(Dav1dCdefDSPContext *const c) {
if (!(flags & DAV1D_X86_CPU_FLAG_SSSE3)) return;
#if BITDEPTH == 8
c->dir = BF(dav1d_cdef_dir, ssse3);
#if BITDEPTH == 8
c->fb[0] = BF(dav1d_cdef_filter_8x8, ssse3);
c->fb[1] = BF(dav1d_cdef_filter_4x8, ssse3);
c->fb[2] = BF(dav1d_cdef_filter_4x4, ssse3);
......@@ -77,8 +79,9 @@ COLD void bitfn(dav1d_cdef_dsp_init_x86)(Dav1dCdefDSPContext *const c) {
#if ARCH_X86_64
if (!(flags & DAV1D_X86_CPU_FLAG_AVX2)) return;
#if BITDEPTH == 8
c->dir = BF(dav1d_cdef_dir, avx2);
#if BITDEPTH == 8
c->fb[0] = BF(dav1d_cdef_filter_8x8, avx2);
c->fb[1] = BF(dav1d_cdef_filter_4x8, avx2);
c->fb[2] = BF(dav1d_cdef_filter_4x4, avx2);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment