Commit 5d888dde authored by Martin Storsjö's avatar Martin Storsjö Committed by Janne Grunau

arm: Consistently use 8/24 columns indentation for assembly

For cases with indented, nested .if/.macro in asm.S, ident those
by 4 chars.

Some initial assembly files were indented to 4/16 columns, while all
the actual implementation files, starting with src/arm/64/mc.S, have
used 8/24 for indentation.
parent 0d936a1a
Pipeline #5989 passed with stages
in 8 minutes and 10 seconds
...@@ -34,32 +34,32 @@ ...@@ -34,32 +34,32 @@
.macro movrel rd, val, offset=0 .macro movrel rd, val, offset=0
#if defined(PIC) && defined(__APPLE__) #if defined(PIC) && defined(__APPLE__)
ldr \rd, 1f ldr \rd, 1f
b 2f b 2f
1: 1:
.word 3f - (2f + 8 - 4 * CONFIG_THUMB) .word 3f - (2f + 8 - 4 * CONFIG_THUMB)
2: 2:
ldr \rd, [pc, \rd] ldr \rd, [pc, \rd]
.if \offset < 0 .if \offset < 0
sub \rd, \rd, #-(\offset) sub \rd, \rd, #-(\offset)
.elseif \offset > 0 .elseif \offset > 0
add \rd, \rd, #\offset add \rd, \rd, #\offset
.endif .endif
.non_lazy_symbol_pointer .non_lazy_symbol_pointer
3: 3:
.indirect_symbol \val .indirect_symbol \val
.word 0 .word 0
.text .text
#elif defined(PIC) #elif defined(PIC)
ldr \rd, 1f ldr \rd, 1f
b 2f b 2f
1: 1:
.word \val + \offset - (2f + 8 - 4 * CONFIG_THUMB) .word \val + \offset - (2f + 8 - 4 * CONFIG_THUMB)
2: 2:
add \rd, \rd, pc add \rd, \rd, pc
#else #else
movw \rd, #:lower16:\val+\offset movw \rd, #:lower16:\val+\offset
movt \rd, #:upper16:\val+\offset movt \rd, #:upper16:\val+\offset
#endif #endif
.endm .endm
......
...@@ -35,57 +35,57 @@ ...@@ -35,57 +35,57 @@
.macro movrel rd, val, offset=0 .macro movrel rd, val, offset=0
#if defined(__APPLE__) #if defined(__APPLE__)
.if \offset < 0 .if \offset < 0
adrp \rd, \val@PAGE adrp \rd, \val@PAGE
add \rd, \rd, \val@PAGEOFF add \rd, \rd, \val@PAGEOFF
sub \rd, \rd, -(\offset) sub \rd, \rd, -(\offset)
.else .else
adrp \rd, \val+(\offset)@PAGE adrp \rd, \val+(\offset)@PAGE
add \rd, \rd, \val+(\offset)@PAGEOFF add \rd, \rd, \val+(\offset)@PAGEOFF
.endif .endif
#elif defined(PIC) && defined(_WIN32) #elif defined(PIC) && defined(_WIN32)
.if \offset < 0 .if \offset < 0
adrp \rd, \val adrp \rd, \val
add \rd, \rd, :lo12:\val add \rd, \rd, :lo12:\val
sub \rd, \rd, -(\offset) sub \rd, \rd, -(\offset)
.else .else
adrp \rd, \val+(\offset) adrp \rd, \val+(\offset)
add \rd, \rd, :lo12:\val+(\offset) add \rd, \rd, :lo12:\val+(\offset)
.endif .endif
#elif defined(PIC) #elif defined(PIC)
adrp \rd, \val+(\offset) adrp \rd, \val+(\offset)
add \rd, \rd, :lo12:\val+(\offset) add \rd, \rd, :lo12:\val+(\offset)
#else #else
ldr \rd, =\val+\offset ldr \rd, =\val+\offset
#endif #endif
.endm .endm
.macro transpose_8x8b r0, r1, r2, r3, r4, r5, r6, r7, r8, r9 .macro transpose_8x8b r0, r1, r2, r3, r4, r5, r6, r7, r8, r9
trn1 \r8\().8b, \r0\().8b, \r1\().8b trn1 \r8\().8b, \r0\().8b, \r1\().8b
trn2 \r9\().8b, \r0\().8b, \r1\().8b trn2 \r9\().8b, \r0\().8b, \r1\().8b
trn1 \r1\().8b, \r2\().8b, \r3\().8b trn1 \r1\().8b, \r2\().8b, \r3\().8b
trn2 \r3\().8b, \r2\().8b, \r3\().8b trn2 \r3\().8b, \r2\().8b, \r3\().8b
trn1 \r0\().8b, \r4\().8b, \r5\().8b trn1 \r0\().8b, \r4\().8b, \r5\().8b
trn2 \r5\().8b, \r4\().8b, \r5\().8b trn2 \r5\().8b, \r4\().8b, \r5\().8b
trn1 \r2\().8b, \r6\().8b, \r7\().8b trn1 \r2\().8b, \r6\().8b, \r7\().8b
trn2 \r7\().8b, \r6\().8b, \r7\().8b trn2 \r7\().8b, \r6\().8b, \r7\().8b
trn1 \r4\().4h, \r0\().4h, \r2\().4h trn1 \r4\().4h, \r0\().4h, \r2\().4h
trn2 \r2\().4h, \r0\().4h, \r2\().4h trn2 \r2\().4h, \r0\().4h, \r2\().4h
trn1 \r6\().4h, \r5\().4h, \r7\().4h trn1 \r6\().4h, \r5\().4h, \r7\().4h
trn2 \r7\().4h, \r5\().4h, \r7\().4h trn2 \r7\().4h, \r5\().4h, \r7\().4h
trn1 \r5\().4h, \r9\().4h, \r3\().4h trn1 \r5\().4h, \r9\().4h, \r3\().4h
trn2 \r9\().4h, \r9\().4h, \r3\().4h trn2 \r9\().4h, \r9\().4h, \r3\().4h
trn1 \r3\().4h, \r8\().4h, \r1\().4h trn1 \r3\().4h, \r8\().4h, \r1\().4h
trn2 \r8\().4h, \r8\().4h, \r1\().4h trn2 \r8\().4h, \r8\().4h, \r1\().4h
trn1 \r0\().2s, \r3\().2s, \r4\().2s trn1 \r0\().2s, \r3\().2s, \r4\().2s
trn2 \r4\().2s, \r3\().2s, \r4\().2s trn2 \r4\().2s, \r3\().2s, \r4\().2s
trn1 \r1\().2s, \r5\().2s, \r6\().2s trn1 \r1\().2s, \r5\().2s, \r6\().2s
trn2 \r5\().2s, \r5\().2s, \r6\().2s trn2 \r5\().2s, \r5\().2s, \r6\().2s
trn2 \r6\().2s, \r8\().2s, \r2\().2s trn2 \r6\().2s, \r8\().2s, \r2\().2s
trn1 \r2\().2s, \r8\().2s, \r2\().2s trn1 \r2\().2s, \r8\().2s, \r2\().2s
trn1 \r3\().2s, \r9\().2s, \r7\().2s trn1 \r3\().2s, \r9\().2s, \r7\().2s
trn2 \r7\().2s, \r9\().2s, \r7\().2s trn2 \r7\().2s, \r9\().2s, \r7\().2s
.endm .endm
#endif /* DAV1D_SRC_ARM_64_UTIL_S */ #endif /* DAV1D_SRC_ARM_64_UTIL_S */
...@@ -31,12 +31,12 @@ ...@@ -31,12 +31,12 @@
#include "config.h" #include "config.h"
#if ARCH_ARM #if ARCH_ARM
.syntax unified .syntax unified
#ifdef __ELF__ #ifdef __ELF__
.arch armv7-a .arch armv7-a
.fpu neon .fpu neon
.eabi_attribute 10, 0 // suppress Tag_FP_arch .eabi_attribute 10, 0 // suppress Tag_FP_arch
.eabi_attribute 12, 0 // suppress Tag_Advanced_SIMD_arch .eabi_attribute 12, 0 // suppress Tag_Advanced_SIMD_arch
#endif #endif
#ifdef _WIN32 #ifdef _WIN32
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
#endif #endif
#if CONFIG_THUMB #if CONFIG_THUMB
.thumb .thumb
#define A @ #define A @
#define T #define T
#else #else
...@@ -86,25 +86,25 @@ ...@@ -86,25 +86,25 @@
#endif #endif
.purgem endfunc .purgem endfunc
.endm .endm
.text .text
.align \align .align \align
.if \export .if \export
.global EXTERN\name .global EXTERN\name
#ifdef __ELF__ #ifdef __ELF__
.type EXTERN\name, %function .type EXTERN\name, %function
#endif #endif
#if HAVE_AS_FUNC #if HAVE_AS_FUNC
.func EXTERN\name .func EXTERN\name
#endif #endif
EXTERN\name: EXTERN\name:
.else .else
#ifdef __ELF__ #ifdef __ELF__
.type \name, %function .type \name, %function
#endif #endif
#if HAVE_AS_FUNC #if HAVE_AS_FUNC
.func \name .func \name
#endif #endif
.endif .endif
\name: \name:
.endm .endm
......
...@@ -32,22 +32,22 @@ ...@@ -32,22 +32,22 @@
#include "src/arm/32/util.S" #include "src/arm/32/util.S"
const register_init, align=3 const register_init, align=3
.quad 0x21f86d66c8ca00ce .quad 0x21f86d66c8ca00ce
.quad 0x75b6ba21077c48ad .quad 0x75b6ba21077c48ad
.quad 0xed56bb2dcb3c7736 .quad 0xed56bb2dcb3c7736
.quad 0x8bda43d3fd1a7e06 .quad 0x8bda43d3fd1a7e06
.quad 0xb64a9c9e5d318408 .quad 0xb64a9c9e5d318408
.quad 0xdf9a54b303f1d3a3 .quad 0xdf9a54b303f1d3a3
.quad 0x4a75479abd64e097 .quad 0x4a75479abd64e097
.quad 0x249214109d5d1c88 .quad 0x249214109d5d1c88
endconst endconst
const error_message_fpscr const error_message_fpscr
.asciz "failed to preserve register FPSCR, changed bits: %x" .asciz "failed to preserve register FPSCR, changed bits: %x"
error_message_gpr: error_message_gpr:
.asciz "failed to preserve register r%d" .asciz "failed to preserve register r%d"
error_message_vfp: error_message_vfp:
.asciz "failed to preserve register d%d" .asciz "failed to preserve register d%d"
endconst endconst
@ max number of args used by any asm function. @ max number of args used by any asm function.
...@@ -61,111 +61,111 @@ endconst ...@@ -61,111 +61,111 @@ endconst
.macro clobbercheck variant .macro clobbercheck variant
.equ pushed, 4*9 .equ pushed, 4*9
function checked_call_\variant, export=1 function checked_call_\variant, export=1
push {r4-r11, lr} push {r4-r11, lr}
.ifc \variant, vfp .ifc \variant, vfp
vpush {d8-d15} vpush {d8-d15}
fmrx r4, FPSCR fmrx r4, FPSCR
push {r4} push {r4}
.equ pushed, pushed + 16*4 + 4 .equ pushed, pushed + 16*4 + 4
.endif .endif
movrel r12, register_init movrel r12, register_init
.ifc \variant, vfp .ifc \variant, vfp
vldm r12, {d8-d15} vldm r12, {d8-d15}
.endif .endif
ldm r12, {r4-r11} ldm r12, {r4-r11}
sub sp, sp, #ARG_STACK_A sub sp, sp, #ARG_STACK_A
.equ pos, 0 .equ pos, 0
.rept MAX_ARGS-4 .rept MAX_ARGS-4
ldr r12, [sp, #ARG_STACK_A + pushed + 8 + pos] ldr r12, [sp, #ARG_STACK_A + pushed + 8 + pos]
str r12, [sp, #pos] str r12, [sp, #pos]
.equ pos, pos + 4 .equ pos, pos + 4
.endr .endr
mov r12, r0 mov r12, r0
mov r0, r2 mov r0, r2
mov r1, r3 mov r1, r3
ldrd r2, r3, [sp, #ARG_STACK_A + pushed] ldrd r2, r3, [sp, #ARG_STACK_A + pushed]
blx r12 blx r12
add sp, sp, #ARG_STACK_A add sp, sp, #ARG_STACK_A
push {r0, r1} push {r0, r1}
movrel r12, register_init movrel r12, register_init
.ifc \variant, vfp .ifc \variant, vfp
.macro check_reg_vfp, dreg, offset .macro check_reg_vfp, dreg, offset
ldrd r2, r3, [r12, #8 * (\offset)] ldrd r2, r3, [r12, #8 * (\offset)]
vmov r0, lr, \dreg vmov r0, lr, \dreg
eor r2, r2, r0 eor r2, r2, r0
eor r3, r3, lr eor r3, r3, lr
orrs r2, r2, r3 orrs r2, r2, r3
bne 4f bne 4f
.endm .endm
.irp n, 8, 9, 10, 11, 12, 13, 14, 15 .irp n, 8, 9, 10, 11, 12, 13, 14, 15
@ keep track of the checked double/SIMD register @ keep track of the checked double/SIMD register
mov r1, #\n mov r1, #\n
check_reg_vfp d\n, \n-8 check_reg_vfp d\n, \n-8
.endr .endr
.purgem check_reg_vfp .purgem check_reg_vfp
fmrx r1, FPSCR fmrx r1, FPSCR
ldr r3, [sp, #8] ldr r3, [sp, #8]
eor r1, r1, r3 eor r1, r1, r3
@ Ignore changes in bits 0-4 and 7 @ Ignore changes in bits 0-4 and 7
bic r1, r1, #0x9f bic r1, r1, #0x9f
@ Ignore changes in the topmost 5 bits @ Ignore changes in the topmost 5 bits
bics r1, r1, #0xf8000000 bics r1, r1, #0xf8000000
bne 3f bne 3f
.endif .endif
@ keep track of the checked GPR @ keep track of the checked GPR
mov r1, #4 mov r1, #4
.macro check_reg reg1, reg2= .macro check_reg reg1, reg2=
ldrd r2, r3, [r12], #8 ldrd r2, r3, [r12], #8
eors r2, r2, \reg1 eors r2, r2, \reg1
bne 2f bne 2f
add r1, r1, #1 add r1, r1, #1
.ifnb \reg2 .ifnb \reg2
eors r3, r3, \reg2 eors r3, r3, \reg2
bne 2f bne 2f
.endif .endif
add r1, r1, #1 add r1, r1, #1
.endm .endm
check_reg r4, r5 check_reg r4, r5
check_reg r6, r7 check_reg r6, r7
@ r9 is a volatile register in the ios ABI @ r9 is a volatile register in the ios ABI
#ifdef __APPLE__ #ifdef __APPLE__
check_reg r8 check_reg r8
#else #else
check_reg r8, r9 check_reg r8, r9
#endif #endif
check_reg r10, r11 check_reg r10, r11
.purgem check_reg .purgem check_reg
b 0f b 0f
4: 4:
movrel r0, error_message_vfp movrel r0, error_message_vfp
b 1f b 1f
3: 3:
movrel r0, error_message_fpscr movrel r0, error_message_fpscr
b 1f b 1f
2: 2:
movrel r0, error_message_gpr movrel r0, error_message_gpr
1: 1:
#ifdef PREFIX #ifdef PREFIX
blx _checkasm_fail_func blx _checkasm_fail_func
#else #else
blx checkasm_fail_func blx checkasm_fail_func
#endif #endif
0: 0:
pop {r0, r1} pop {r0, r1}
.ifc \variant, vfp .ifc \variant, vfp
pop {r2} pop {r2}
fmxr FPSCR, r2 fmxr FPSCR, r2
vpop {d8-d15} vpop {d8-d15}
.endif .endif
pop {r4-r11, pc} pop {r4-r11, pc}
endfunc endfunc
.endm .endm
......
...@@ -32,29 +32,29 @@ ...@@ -32,29 +32,29 @@
#include "src/arm/64/util.S" #include "src/arm/64/util.S"
const register_init, align=4 const register_init, align=4
.quad 0x21f86d66c8ca00ce .quad 0x21f86d66c8ca00ce
.quad 0x75b6ba21077c48ad .quad 0x75b6ba21077c48ad
.quad 0xed56bb2dcb3c7736 .quad 0xed56bb2dcb3c7736
.quad 0x8bda43d3fd1a7e06 .quad 0x8bda43d3fd1a7e06
.quad 0xb64a9c9e5d318408 .quad 0xb64a9c9e5d318408
.quad 0xdf9a54b303f1d3a3 .quad 0xdf9a54b303f1d3a3
.quad 0x4a75479abd64e097 .quad 0x4a75479abd64e097
.quad 0x249214109d5d1c88 .quad 0x249214109d5d1c88
.quad 0x1a1b2550a612b48c .quad 0x1a1b2550a612b48c
.quad 0x79445c159ce79064 .quad 0x79445c159ce79064
.quad 0x2eed899d5a28ddcd .quad 0x2eed899d5a28ddcd
.quad 0x86b2536fcd8cf636 .quad 0x86b2536fcd8cf636
.quad 0xb0856806085e7943 .quad 0xb0856806085e7943
.quad 0x3f2bf84fc0fcca4e .quad 0x3f2bf84fc0fcca4e
.quad 0xacbd382dcf5b8de2 .quad 0xacbd382dcf5b8de2
.quad 0xd229e1f5b281303f .quad 0xd229e1f5b281303f
.quad 0x71aeaff20b095fd9 .quad 0x71aeaff20b095fd9
.quad 0xab63e2e11fa38ed9 .quad 0xab63e2e11fa38ed9
endconst endconst
const error_message const error_message
.asciz "failed to preserve register" .asciz "failed to preserve register"
endconst endconst
...@@ -64,107 +64,107 @@ endconst ...@@ -64,107 +64,107 @@ endconst
#define CLOBBER_STACK ((8*MAX_ARGS + 15) & ~15) #define CLOBBER_STACK ((8*MAX_ARGS + 15) & ~15)
function stack_clobber, export=1 function stack_clobber, export=1
mov x3, sp mov x3, sp
mov x2, #CLOBBER_STACK mov x2, #CLOBBER_STACK
1: 1:
stp x0, x1, [sp, #-16]! stp x0, x1, [sp, #-16]!
subs x2, x2, #16 subs x2, x2, #16
b.gt 1b b.gt 1b
mov sp, x3 mov sp, x3
ret ret
endfunc endfunc
#define ARG_STACK ((8*(MAX_ARGS - 8) + 15) & ~15) #define ARG_STACK ((8*(MAX_ARGS - 8) + 15) & ~15)
function checked_call, export=1 function checked_call, export=1
stp x29, x30, [sp, #-16]! stp x29, x30, [sp, #-16]!
mov x29, sp mov x29, sp
stp x19, x20, [sp, #-16]! stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]! stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]! stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]! stp x25, x26, [sp, #-16]!
stp x27, x28, [sp, #-16]! stp x27, x28, [sp, #-16]!
stp d8, d9, [sp, #-16]! stp d8, d9, [sp, #-16]!
stp d10, d11, [sp, #-16]! stp d10, d11, [sp, #-16]!
stp d12, d13, [sp, #-16]! stp d12, d13, [sp, #-16]!
stp d14, d15, [sp, #-16]! stp d14, d15, [sp, #-16]!
movrel x9, register_init movrel x9, register_init
ldp d8, d9, [x9], #16 ldp d8, d9, [x9], #16
ldp d10, d11, [x9], #16 ldp d10, d11, [x9], #16
ldp d12, d13, [x9], #16 ldp d12, d13, [x9], #16
ldp d14, d15, [x9], #16 ldp d14, d15, [x9], #16
ldp x19, x20, [x9], #16 ldp x19, x20, [x9], #16
ldp x21, x22, [x9], #16 ldp x21, x22, [x9], #16
ldp x23, x24, [x9], #16 ldp x23, x24, [x9], #16
ldp x25, x26, [x9], #16 ldp x25, x26, [x9], #16
ldp x27, x28, [x9], #16 ldp x27, x28, [x9], #16
sub sp, sp, #ARG_STACK sub sp, sp, #ARG_STACK
.equ pos, 0 .equ pos, 0
.rept MAX_ARGS-8 .rept MAX_ARGS-8
// Skip the first 8 args, that are loaded into registers // Skip the first 8 args, that are loaded into registers
ldr x9, [x29, #16 + 8*8 + pos] ldr x9, [x29, #16 + 8*8 + pos]
str x9, [sp, #pos] str x9, [sp, #pos]
.equ pos, pos + 8 .equ pos, pos + 8
.endr .endr
mov x12, x0 mov x12, x0
ldp x0, x1, [x29, #16] ldp x0, x1, [x29, #16]
ldp x2, x3, [x29, #32] ldp x2, x3, [x29, #32]
ldp x4, x5, [x29, #48] ldp x4, x5, [x29, #48]
ldp x6, x7, [x29, #64] ldp x6, x7, [x29, #64]
blr x12 blr x12
add sp, sp, #ARG_STACK add sp, sp, #ARG_STACK
stp x0, x1, [sp, #-16]! stp x0, x1, [sp, #-16]!
movrel x9, register_init movrel x9, register_init
movi v3.8h, #0 movi v3.8h, #0
.macro check_reg_neon reg1, reg2 .macro check_reg_neon reg1, reg2
ldr q0, [x9], #16 ldr q0, [x9], #16
uzp1 v1.2d, v\reg1\().2d, v\reg2\().2d uzp1 v1.2d, v\reg1\().2d, v\reg2\().2d
eor v0.16b, v0.16b, v1.16b eor v0.16b, v0.16b, v1.16b
orr v3.16b, v3.16b, v0.16b orr v3.16b, v3.16b, v0.16b
.endm .endm
check_reg_neon 8, 9 check_reg_neon 8, 9
check_reg_neon 10, 11 check_reg_neon 10, 11
check_reg_neon 12, 13 check_reg_neon 12, 13
check_reg_neon 14, 15 check_reg_neon 14, 15
uqxtn v3.8b, v3.8h uqxtn v3.8b, v3.8h
umov x3, v3.d[0] umov x3, v3.d[0]
.macro check_reg reg1, reg2 .macro check_reg reg1, reg2
ldp x0, x1, [x9], #16 ldp x0, x1, [x9], #16
eor x0, x0, \reg1 eor x0, x0, \reg1
eor x1, x1, \reg2 eor x1, x1, \reg2
orr x3, x3, x0 orr x3, x3, x0
orr x3, x3, x1 orr x3, x3, x1
.endm .endm
check_reg x19, x20 check_reg x19, x20
check_reg x21, x22 check_reg x21, x22
check_reg x23, x24 check_reg x23, x24
check_reg x25, x26 check_reg x25, x26
check_reg x27, x28 check_reg x27, x28
cbz x3, 0f cbz x3, 0f
movrel x0, error_message movrel x0, error_message
#ifdef PREFIX #ifdef PREFIX
bl _checkasm_fail_func bl _checkasm_fail_func
#else #else
bl checkasm_fail_func bl checkasm_fail_func
#endif #endif
0: 0:
ldp x0, x1, [sp], #16 ldp x0, x1, [sp], #16
ldp d14, d15, [sp], #16 ldp d14, d15, [sp], #16
ldp d12, d13, [sp], #16 ldp d12, d13, [sp], #16
ldp d10, d11, [sp], #16 ldp d10, d11, [sp], #16
ldp d8, d9, [sp], #16 ldp d8, d9, [sp], #16
ldp x27, x28, [sp], #16 ldp x27, x28, [sp], #16
ldp x25, x26, [sp], #16 ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16 ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16 ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16 ldp x19, x20, [sp], #16
ldp x29, x30, [sp], #16 ldp x29, x30, [sp], #16
ret ret
endfunc endfunc
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment