shithub: openh264

Download patch

ref: faaf62afadeedc01a89a482ab56ec23027b6c3ba
parent: ac03b8b503a9bc1b4f597551ade77e4cf43c887d
author: Martin Storsjö <martin@martin.st>
date: Sat May 31 09:29:36 EDT 2014

Get rid of double spaces in macro declarations

--- a/codec/common/x86/asm_inc.asm
+++ b/codec/common/x86/asm_inc.asm
@@ -542,7 +542,7 @@
 %endmacro
 
 ;copy a dw into a xmm for 8 times
-%macro  SSE2_Copy8Times 2
+%macro SSE2_Copy8Times 2
 		movd	%1, %2
 		punpcklwd %1, %1
 		pshufd	%1,	%1,	0
@@ -549,7 +549,7 @@
 %endmacro
 
 ;copy a db into a xmm for 16 times
-%macro  SSE2_Copy16Times 2
+%macro SSE2_Copy16Times 2
 		movd		%1, %2
 		pshuflw		%1, %1, 0
 		punpcklqdq	%1, %1
--- a/codec/common/x86/satd_sad.asm
+++ b/codec/common/x86/satd_sad.asm
@@ -82,7 +82,7 @@
       psubw %1, %2
 %endmacro
 
-%macro  SSE2_SumWHorizon1 2
+%macro SSE2_SumWHorizon1 2
 	movdqa      %2, %1
 	psrldq      %2, 8
 	paddusw     %1, %2
@@ -112,7 +112,7 @@
 	paddusw       %7, %4
 %endmacro
 
-%macro  SSE2_SumWHorizon 3
+%macro SSE2_SumWHorizon 3
 	movhlps		%2, %1			; x2 = xx xx xx xx d7 d6 d5 d4
 	paddw		%1, %2			; x1 = xx xx xx xx d37 d26 d15 d04
 	punpcklwd	%1, %3			; x1 =  d37  d26 d15 d04
--- a/codec/decoder/core/x86/intra_pred.asm
+++ b/codec/decoder/core/x86/intra_pred.asm
@@ -119,7 +119,7 @@
 		punpckhdq %1,	%4
 %endmacro
 
-%macro  SUMW_HORIZON 3
+%macro SUMW_HORIZON 3
 	movhlps		%2, %1			; x2 = xx xx xx xx d7 d6 d5 d4
 	paddw		%1, %2			; x1 = xx xx xx xx d37 d26 d15 d04
 	punpcklwd	%1, %3			; x1 =  d37  d26 d15 d04
@@ -129,7 +129,7 @@
 	paddd		%1, %2			; x1 = xxxx xxxx xxxx  d01234567
 %endmacro
 
-%macro  COPY_16_TIMES 2
+%macro COPY_16_TIMES 2
 		movdqa		%2,	[%1-16]
 		psrldq		%2,	15
 		pmuludq		%2,	[mmx_01bytes]
@@ -136,7 +136,7 @@
 		pshufd		%2,	%2, 0
 %endmacro
 
-%macro  COPY_16_TIMESS 3
+%macro COPY_16_TIMESS 3
 		movdqa		%2,	[%1+%3-16]
 		psrldq		%2,	15
 		pmuludq		%2,	[mmx_01bytes]
--- a/codec/encoder/core/x86/intra_pred.asm
+++ b/codec/encoder/core/x86/intra_pred.asm
@@ -96,7 +96,7 @@
 	punpckldq	%1,	%2
 %endmacro
 
-%macro  SUMW_HORIZON1 2
+%macro SUMW_HORIZON1 2
 	movdqa      %2, %1
 	psrldq      %2, 8
 	paddusw     %1, %2
@@ -130,7 +130,7 @@
 		punpckhdq %1,	%4
 %endmacro
 
-%macro  SUMW_HORIZON 3
+%macro SUMW_HORIZON 3
 	movhlps		%2, %1			; x2 = xx xx xx xx d7 d6 d5 d4
 	paddw		%1, %2			; x1 = xx xx xx xx d37 d26 d15 d04
 	punpcklwd	%1, %3			; x1 =  d37  d26 d15 d04
@@ -141,7 +141,7 @@
 %endmacro
 
 
-%macro  COPY_16_TIMES 2
+%macro COPY_16_TIMES 2
 		movdqa		%2,	[%1-16]
 		psrldq		%2,	15
 		pmuludq		%2,	[mmx_01bytes]
@@ -148,7 +148,7 @@
 		pshufd		%2,	%2, 0
 %endmacro
 
-%macro  COPY_16_TIMESS 3
+%macro COPY_16_TIMESS 3
 		movdqa		%2,	[%1+%3-16]
 		psrldq		%2,	15
 		pmuludq		%2,	[mmx_01bytes]
--- a/codec/encoder/core/x86/quant.asm
+++ b/codec/encoder/core/x86/quant.asm
@@ -166,7 +166,7 @@
 		LOAD_4_PARA_POP
 		ret
 
-%macro  MMX_Copy4Times 2
+%macro MMX_Copy4Times 2
 		movd		%1, %2
 		punpcklwd	%1, %1
 		punpckldq	%1,	%1
--- a/codec/encoder/core/x86/sample_sc.asm
+++ b/codec/encoder/core/x86/sample_sc.asm
@@ -44,7 +44,7 @@
 ;**********************************************************************************************************************************
 ; try 8 mv via offset
 ; xmm7 store sad costs
-%macro   SAD_16x16_LINE_SSE41  4	; src, ref, stride_src, stride_ref
+%macro SAD_16x16_LINE_SSE41  4	; src, ref, stride_src, stride_ref
     movdqa		xmm0, [%1]
     movdqu		xmm1, [%2]
     movdqu		xmm2, [%2+8h]
@@ -66,7 +66,7 @@
     add			%1, %3
     add			%2, %4
 %endmacro	; end of SAD_16x16_LINE_SSE41
-%macro   SAD_16x16_LINE_SSE41E  4	; src, ref, stride_src, stride_ref
+%macro SAD_16x16_LINE_SSE41E  4	; src, ref, stride_src, stride_ref
     movdqa		xmm0, [%1]
     movdqu		xmm1, [%2]
     movdqu		xmm2, [%2+8h]
@@ -168,7 +168,7 @@
 ;**********************************************************************************************************************************
 ; try 8 mv via offset
 ; xmm7 store sad costs
-%macro   SAD_8x8_LINE_SSE41  4	; src, ref, stride_src, stride_ref
+%macro SAD_8x8_LINE_SSE41  4	; src, ref, stride_src, stride_ref
     movdqu		xmm0, [%1]
     movdqu		xmm1, [%2]
     movdqa		xmm2, xmm1
@@ -182,7 +182,7 @@
     add			%1, %3
     add			%2, %4
 %endmacro	; end of SAD_8x8_LINE_SSE41
-%macro   SAD_8x8_LINE_SSE41E  4	; src, ref, stride_src, stride_ref
+%macro SAD_8x8_LINE_SSE41E  4	; src, ref, stride_src, stride_ref
     movdqu		xmm0, [%1]
     movdqu		xmm1, [%2]
     movdqa		xmm2, xmm1
--- a/codec/processing/src/arm/vaa_calc_neon.S
+++ b/codec/processing/src/arm/vaa_calc_neon.S
@@ -161,7 +161,7 @@
 
 
 #ifdef __APPLE__
-.macro  SAD_SD_MAD_16BYTES
+.macro SAD_SD_MAD_16BYTES
 	vld1.32 {q0}, [$0], $2
 	vld1.32 {q1}, [$1], $2
 
@@ -173,7 +173,7 @@
 	vpadal.u8 $6, q0
 .endm
 
-.macro  SAD_SD_MAD_8x16BYTES
+.macro SAD_SD_MAD_8x16BYTES
 	vld1.32 {q0}, [$0], $2
 	vld1.32 {q1}, [$1], $2
 
@@ -195,7 +195,7 @@
 	vsub.u16 $5, q2, q3
 .endm
 
-.macro  SAD_SD_MAD_CALC
+.macro SAD_SD_MAD_CALC
 	vpmax.u8 d0, $0, $1 //8bytes
 	vpmax.u8 d0, d0, d0 //4bytes
 	vpmax.u8 $2, d0, d0 //2bytes
@@ -206,7 +206,7 @@
 	vpaddl.s32 $4, $4
 .endm
 #else
-.macro  SAD_SD_MAD_16BYTES arg0, arg1, arg2, arg3, arg4, arg5, arg6
+.macro SAD_SD_MAD_16BYTES arg0, arg1, arg2, arg3, arg4, arg5, arg6
 	vld1.32 {q0}, [\arg0], \arg2
 	vld1.32 {q1}, [\arg1], \arg2
 
@@ -218,7 +218,7 @@
 	vpadal.u8 \arg6, q0
 .endm
 
-.macro  SAD_SD_MAD_8x16BYTES arg0, arg1, arg2, arg3, arg4, arg5
+.macro SAD_SD_MAD_8x16BYTES arg0, arg1, arg2, arg3, arg4, arg5
 	vld1.32 {q0}, [\arg0], \arg2
 	vld1.32 {q1}, [\arg1], \arg2
 
@@ -240,7 +240,7 @@
 	vsub.u16 \arg5, q2, q3
 .endm
 
-.macro  SAD_SD_MAD_CALC arg0, arg1, arg2, arg3, arg4
+.macro SAD_SD_MAD_CALC arg0, arg1, arg2, arg3, arg4
 	vpmax.u8 d0, \arg0, \arg1 //8bytes
 	vpmax.u8 d0, d0, d0 //4bytes
 	vpmax.u8 \arg2, d0, d0 //2bytes
@@ -315,7 +315,7 @@
 
 
 #ifdef __APPLE__
-.macro  SSD_MUL_SUM_16BYTES_RESET
+.macro SSD_MUL_SUM_16BYTES_RESET
 	vmull.u8 $3, $0, $0
 	vpaddl.u16 $2, $3
 
@@ -323,7 +323,7 @@
 	vpadal.u16 $2, $3
 .endm
 
-.macro  SSD_MUL_SUM_16BYTES
+.macro SSD_MUL_SUM_16BYTES
 	vmull.u8 $3, $0, $0
 	vpadal.u16 $2, $3
 
@@ -467,13 +467,13 @@
 	SAD_SSD_BGD_CALC_8x16 d27, q15, q7
 .endm
 
-.macro  SSD_SAD_SD_MAD_PADDL
+.macro SSD_SAD_SD_MAD_PADDL
 	vpaddl.s16 $0, $0
 	vpaddl.s32 $0, $0
 	vadd.i32 $1, $1, $2
 .endm
 #else
-.macro  SSD_MUL_SUM_16BYTES_RESET arg0, arg1, arg2, arg3
+.macro SSD_MUL_SUM_16BYTES_RESET arg0, arg1, arg2, arg3
 	vmull.u8   \arg3, \arg0, \arg0
 	vpaddl.u16 \arg2, \arg3
 
@@ -481,7 +481,7 @@
 	vpadal.u16 \arg2, \arg3
 .endm
 
-.macro  SSD_MUL_SUM_16BYTES arg0, arg1, arg2, arg3
+.macro SSD_MUL_SUM_16BYTES arg0, arg1, arg2, arg3
 	vmull.u8   \arg3, \arg0, \arg0
 	vpadal.u16 \arg2, \arg3
 
@@ -625,7 +625,7 @@
 	SAD_SSD_BGD_CALC_8x16 d27, q15, q7
 .endm
 
-.macro  SSD_SAD_SD_MAD_PADDL arg0, arg1, arg2
+.macro SSD_SAD_SD_MAD_PADDL arg0, arg1, arg2
 	vpaddl.s16 \arg0, \arg0
 	vpaddl.s32 \arg0, \arg0
 	vadd.i32 \arg1, \arg1, \arg2
--- a/codec/processing/src/x86/vaa.asm
+++ b/codec/processing/src/x86/vaa.asm
@@ -85,7 +85,7 @@
   ; end of @sum_8x2
 %endmacro       ; END of SUM_WORD_8x2_SSE2
 
-%macro  WELS_SAD_SUM_SQSUM_16x1_SSE2 3 ;esi:%1,edi:%2,ebx:%3
+%macro WELS_SAD_SUM_SQSUM_16x1_SSE2 3 ;esi:%1,edi:%2,ebx:%3
   movdqa        xmm1,   [%1]
   movdqa        xmm2,   [%2]
   movdqa        xmm3,   xmm1
@@ -108,7 +108,7 @@
   add           %2,     %3
 %endmacro
 
-%macro  WELS_SAD_SUM_SQSUM_SQDIFF_16x1_SSE2 3 ;esi:%1 edi:%2 ebx:%3
+%macro WELS_SAD_SUM_SQSUM_SQDIFF_16x1_SSE2 3 ;esi:%1 edi:%2 ebx:%3
   movdqa        xmm1,   [%1]
   movdqa        xmm2,   [%2]
   movdqa        xmm3,   xmm1
@@ -144,7 +144,7 @@
   add           %2,     %3
 %endmacro
 
-%macro  WELS_SAD_SD_MAD_16x1_SSE2       7 ;esi:%5 edi:%6 ebx:%7
+%macro WELS_SAD_SD_MAD_16x1_SSE2       7 ;esi:%5 edi:%6 ebx:%7
 %define sad_reg                 %1
 %define sum_cur_reg             %2
 %define sum_ref_reg             %3
@@ -172,7 +172,7 @@
 %endmacro
 
 
-%macro  WELS_MAX_REG_SSE2       1       ; xmm1, xmm2, xmm3 can be used
+%macro WELS_MAX_REG_SSE2       1       ; xmm1, xmm2, xmm3 can be used
 %define max_reg  %1
   movdqa        xmm1,           max_reg
   psrldq        xmm1,           4
@@ -185,7 +185,7 @@
   pmaxub        max_reg,        xmm1
 %endmacro
 
-%macro  WELS_SAD_BGD_SQDIFF_16x1_SSE2   7 ;esi:%5 edi:%6 ebx:%7
+%macro WELS_SAD_BGD_SQDIFF_16x1_SSE2   7 ;esi:%5 edi:%6 ebx:%7
 %define sad_reg         %1
 %define sum_reg         %2
 %define mad_reg         %3