1 |
;/***************************************************************************** |
;/**************************************************************************** |
2 |
; * |
; * |
3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * sse2 sum of absolute difference |
; * - SSE2 optimized SAD operators - |
5 |
; * |
; * |
6 |
; * Copyright(C) 2002 Dmitry Rozhdestvensky |
; * Copyright(C) 2003 Pascal Massimino <skal@planet-d.net> |
7 |
; * |
; * |
|
; * This file is part of XviD, a free MPEG-4 video encoder/decoder |
|
8 |
; * |
; * |
9 |
; * XviD is free software; you can redistribute it and/or modify it |
; * This program is free software; you can redistribute it and/or modify it |
10 |
; * under the terms of the GNU General Public License as published by |
; * under the terms of the GNU General Public License as published by |
11 |
; * the Free Software Foundation; either version 2 of the License, or |
; * the Free Software Foundation; either version 2 of the License, or |
12 |
; * (at your option) any later version. |
; * (at your option) any later version. |
20 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
21 |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
22 |
; * |
; * |
23 |
; * Under section 8 of the GNU General Public License, the copyright |
; * $Id: sad_sse2.asm,v 1.15 2008-11-11 20:46:24 Isibaar Exp $ |
|
; * holders of XVID explicitly forbid distribution in the following |
|
|
; * countries: |
|
24 |
; * |
; * |
25 |
; * - Japan |
; ***************************************************************************/ |
|
; * - United States of America |
|
|
; * |
|
|
; * Linking XviD statically or dynamically with other modules is making a |
|
|
; * combined work based on XviD. Thus, the terms and conditions of the |
|
|
; * GNU General Public License cover the whole combination. |
|
|
; * |
|
|
; * As a special exception, the copyright holders of XviD give you |
|
|
; * permission to link XviD with independent modules that communicate with |
|
|
; * XviD solely through the VFW1.1 and DShow interfaces, regardless of the |
|
|
; * license terms of these independent modules, and to copy and distribute |
|
|
; * the resulting combined work under terms of your choice, provided that |
|
|
; * every copy of the combined work is accompanied by a complete copy of |
|
|
; * the source code of XviD (the version of XviD used to produce the |
|
|
; * combined work), being distributed under the terms of the GNU General |
|
|
; * Public License plus this exception. An independent module is a module |
|
|
; * which is not derived from or based on XviD. |
|
|
; * |
|
|
; * Note that people who make modified versions of XviD are not obligated |
|
|
; * to grant this special exception for their modified versions; it is |
|
|
; * their choice whether to do so. The GNU General Public License gives |
|
|
; * permission to release a modified version without this exception; this |
|
|
; * exception also makes it possible to release a modified version which |
|
|
; * carries forward this exception. |
|
|
; * |
|
|
; * $Id: sad_sse2.asm,v 1.7 2002-11-17 00:32:06 edgomez Exp $ |
|
|
; * |
|
|
; ****************************************************************************/ |
|
26 |
|
|
27 |
bits 32 |
BITS 32 |
28 |
|
|
29 |
%macro cglobal 1 |
%macro cglobal 1 |
30 |
%ifdef PREFIX |
%ifdef PREFIX |
31 |
|
%ifdef MARK_FUNCS |
32 |
|
global _%1:function %1.endfunc-%1 |
33 |
|
%define %1 _%1:function %1.endfunc-%1 |
34 |
|
%define ENDFUNC .endfunc |
35 |
|
%else |
36 |
global _%1 |
global _%1 |
37 |
%define %1 _%1 |
%define %1 _%1 |
38 |
|
%define ENDFUNC |
39 |
|
%endif |
40 |
|
%else |
41 |
|
%ifdef MARK_FUNCS |
42 |
|
global %1:function %1.endfunc-%1 |
43 |
|
%define ENDFUNC .endfunc |
44 |
%else |
%else |
45 |
global %1 |
global %1 |
46 |
|
%define ENDFUNC |
47 |
|
%endif |
48 |
%endif |
%endif |
49 |
%endmacro |
%endmacro |
50 |
|
|
51 |
%define sad_debug 0 ;1=unaligned 2=ref unaligned 3=aligned 0=autodetect |
;============================================================================= |
52 |
%define dev_debug 2 ;1=unaligned 2=aligned 0=autodetect |
; Read only data |
53 |
%define test_stride_alignment 0 ;test stride for alignment while autodetect |
;============================================================================= |
|
%define early_return 0 ;use early return in sad |
|
54 |
|
|
55 |
section .data |
%ifdef FORMAT_COFF |
56 |
|
SECTION .rodata |
57 |
|
%else |
58 |
|
SECTION .rodata align=16 |
59 |
|
%endif |
60 |
|
|
61 |
align 64 |
ALIGN 64 |
|
buffer times 4*8 dd 0 ;8 128-bit words |
|
62 |
zero times 4 dd 0 |
zero times 4 dd 0 |
63 |
|
|
64 |
section .text |
;============================================================================= |
65 |
|
; Code |
66 |
|
;============================================================================= |
67 |
|
|
68 |
|
SECTION .text |
69 |
|
|
70 |
cglobal sad16_sse2 |
cglobal sad16_sse2 |
71 |
cglobal dev16_sse2 |
cglobal dev16_sse2 |
72 |
|
|
73 |
;=========================================================================== |
cglobal sad16_sse3 |
74 |
; General macros for SSE2 code |
cglobal dev16_sse3 |
|
;=========================================================================== |
|
|
|
|
|
%macro load_stride 1 |
|
|
mov ecx,%1 |
|
|
add ecx,ecx |
|
|
mov edx,ecx |
|
|
add ecx,%1 ;stride*3 |
|
|
add edx,edx ;stride*4 |
|
|
%endmacro |
|
|
|
|
|
%macro sad8lines 1 |
|
|
|
|
|
psadbw xmm0,[%1] |
|
|
psadbw xmm1,[%1+ebx] |
|
|
psadbw xmm2,[%1+ebx*2] |
|
|
psadbw xmm3,[%1+ecx] |
|
|
|
|
|
add %1,edx |
|
|
|
|
|
psadbw xmm4,[%1] |
|
|
psadbw xmm5,[%1+ebx] |
|
|
psadbw xmm6,[%1+ebx*2] |
|
|
psadbw xmm7,[%1+ecx] |
|
|
|
|
|
add %1,edx |
|
|
%endmacro |
|
|
|
|
|
%macro after_sad 1 ; Summarizes 0th and 4th words of all xmm registers |
|
|
|
|
|
paddusw xmm0,xmm1 |
|
|
paddusw xmm2,xmm3 |
|
|
paddusw xmm4,xmm5 |
|
|
paddusw xmm6,xmm7 |
|
|
|
|
|
paddusw xmm0,xmm2 |
|
|
paddusw xmm4,xmm6 |
|
|
|
|
|
paddusw xmm4,xmm0 |
|
|
pshufd xmm5,xmm4,11111110b |
|
|
paddusw xmm5,xmm4 |
|
|
|
|
|
pextrw %1,xmm5,0 ;less latency then movd |
|
|
%endmacro |
|
|
|
|
|
%macro restore 1 ;restores used registers |
|
|
|
|
|
%if %1=1 |
|
|
pop ebp |
|
|
%endif |
|
|
pop edi |
|
|
pop esi |
|
|
pop ebx |
|
|
%endmacro |
|
75 |
|
|
76 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
77 |
; |
; uint32_t sad16_sse2 (const uint8_t * const cur, <- assumed aligned! |
|
; uint32_t sad16_sse2 (const uint8_t * const cur, |
|
78 |
; const uint8_t * const ref, |
; const uint8_t * const ref, |
79 |
; const uint32_t stride, |
; const uint32_t stride, |
80 |
; const uint32_t best_sad); |
; const uint32_t /*ignored*/); |
81 |
; |
;----------------------------------------------------------------------------- |
|
; |
|
|
;=========================================================================== |
|
|
|
|
|
align 16 |
|
|
sad16_sse2 |
|
|
push ebx |
|
|
push esi |
|
|
push edi |
|
|
|
|
|
mov ebx,[esp + 3*4 + 12] ;stride |
|
|
|
|
|
%if sad_debug<>0 |
|
|
mov edi,[esp + 3*4 + 4] |
|
|
mov esi,[esp + 3*4 + 8] |
|
|
%endif |
|
|
|
|
|
%if sad_debug=1 |
|
|
jmp sad16_sse2_ul |
|
|
%endif |
|
|
%if sad_debug=2 |
|
|
jmp sad16_sse2_semial |
|
|
%endif |
|
|
%if sad_debug=3 |
|
|
jmp sad16_sse2_al |
|
|
%endif |
|
|
|
|
|
%if test_stride_alignment<>0 |
|
|
test ebx,15 |
|
|
jnz sad16_sse2_ul |
|
|
%endif |
|
|
mov edi,[esp + 3*4 + 4] ;cur (most likely aligned) |
|
82 |
|
|
|
test edi,15 |
|
|
cmovz esi,[esp + 3*4 + 8] ;load esi if edi is aligned |
|
|
cmovnz esi,edi ;move to esi and load edi |
|
|
cmovnz edi,[esp + 3*4 + 8] ;if not |
|
|
jnz esi_unaligned |
|
|
|
|
|
test esi,15 |
|
|
jnz near sad16_sse2_semial |
|
|
jmp sad16_sse2_al |
|
|
|
|
|
esi_unaligned: test edi,15 |
|
|
jnz near sad16_sse2_ul |
|
|
jmp sad16_sse2_semial |
|
|
|
|
|
;=========================================================================== |
|
|
; Branch requires 16-byte alignment of esi and edi and stride |
|
|
;=========================================================================== |
|
|
|
|
|
%macro sad16x8_al 1 |
|
|
|
|
|
movdqa xmm0,[esi] |
|
|
movdqa xmm1,[esi+ebx] |
|
|
movdqa xmm2,[esi+ebx*2] |
|
|
movdqa xmm3,[esi+ecx] |
|
|
|
|
|
add esi,edx |
|
|
|
|
|
movdqa xmm4,[esi] |
|
|
movdqa xmm5,[esi+ebx] |
|
|
movdqa xmm6,[esi+ebx*2] |
|
|
movdqa xmm7,[esi+ecx] |
|
|
|
|
|
add esi,edx |
|
|
|
|
|
sad8lines edi |
|
|
|
|
|
after_sad %1 |
|
|
|
|
|
%endmacro |
|
|
|
|
|
align 16 |
|
|
sad16_sse2_al |
|
|
|
|
|
load_stride ebx |
|
|
|
|
|
sad16x8_al eax |
|
|
|
|
|
%if early_return=1 |
|
|
cmp eax,[esp + 3*4 + 16] ;best_sad |
|
|
jg continue_al |
|
|
%endif |
|
|
|
|
|
sad16x8_al ebx |
|
|
|
|
|
add eax,ebx |
|
|
|
|
|
continue_al: restore 0 |
|
83 |
|
|
84 |
|
%macro SAD_16x16_SSE2 1 |
85 |
|
%1 xmm0, [edx] |
86 |
|
%1 xmm1, [edx+ecx] |
87 |
|
lea edx,[edx+2*ecx] |
88 |
|
movdqa xmm2, [eax] |
89 |
|
movdqa xmm3, [eax+ecx] |
90 |
|
lea eax,[eax+2*ecx] |
91 |
|
psadbw xmm0, xmm2 |
92 |
|
paddusw xmm6,xmm0 |
93 |
|
psadbw xmm1, xmm3 |
94 |
|
paddusw xmm6,xmm1 |
95 |
|
%endmacro |
96 |
|
|
97 |
|
%macro SAD16_SSE2_SSE3 1 |
98 |
|
mov eax, [esp+ 4] ; cur (assumed aligned) |
99 |
|
mov edx, [esp+ 8] ; ref |
100 |
|
mov ecx, [esp+12] ; stride |
101 |
|
|
102 |
|
pxor xmm6, xmm6 ; accum |
103 |
|
|
104 |
|
SAD_16x16_SSE2 %1 |
105 |
|
SAD_16x16_SSE2 %1 |
106 |
|
SAD_16x16_SSE2 %1 |
107 |
|
SAD_16x16_SSE2 %1 |
108 |
|
SAD_16x16_SSE2 %1 |
109 |
|
SAD_16x16_SSE2 %1 |
110 |
|
SAD_16x16_SSE2 %1 |
111 |
|
SAD_16x16_SSE2 %1 |
112 |
|
|
113 |
|
pshufd xmm5, xmm6, 00000010b |
114 |
|
paddusw xmm6, xmm5 |
115 |
|
pextrw eax, xmm6, 0 |
116 |
ret |
ret |
|
|
|
|
;=========================================================================== |
|
|
; Branch requires 16-byte alignment of the edi and stride only |
|
|
;=========================================================================== |
|
|
|
|
|
%macro sad16x8_semial 1 |
|
|
|
|
|
movdqu xmm0,[esi] |
|
|
movdqu xmm1,[esi+ebx] |
|
|
movdqu xmm2,[esi+ebx*2] |
|
|
movdqu xmm3,[esi+ecx] |
|
|
|
|
|
add esi,edx |
|
|
|
|
|
movdqu xmm4,[esi] |
|
|
movdqu xmm5,[esi+ebx] |
|
|
movdqu xmm6,[esi+ebx*2] |
|
|
movdqu xmm7,[esi+ecx] |
|
|
|
|
|
add esi,edx |
|
|
|
|
|
sad8lines edi |
|
|
|
|
|
after_sad %1 |
|
|
|
|
117 |
%endmacro |
%endmacro |
118 |
|
|
119 |
align 16 |
ALIGN 16 |
120 |
sad16_sse2_semial |
sad16_sse2: |
121 |
|
SAD16_SSE2_SSE3 movdqu |
122 |
load_stride ebx |
ENDFUNC |
123 |
|
|
124 |
sad16x8_semial eax |
|
125 |
|
ALIGN 16 |
126 |
%if early_return=1 |
sad16_sse3: |
127 |
cmp eax,[esp + 3*4 + 16] ;best_sad |
SAD16_SSE2_SSE3 lddqu |
128 |
jg cont_semial |
ENDFUNC |
129 |
%endif |
|
130 |
|
|
131 |
sad16x8_semial ebx |
;----------------------------------------------------------------------------- |
132 |
|
; uint32_t dev16_sse2(const uint8_t * const cur, const uint32_t stride); |
133 |
add eax,ebx |
;----------------------------------------------------------------------------- |
134 |
|
|
135 |
cont_semial: restore 0 |
%macro MEAN_16x16_SSE2 1 ; eax: src, ecx:stride, mm7: zero or mean => mm6: result |
136 |
|
%1 xmm0, [eax] |
137 |
ret |
%1 xmm1, [eax+ecx] |
138 |
|
lea eax, [eax+2*ecx] ; + 2*stride |
|
|
|
|
;=========================================================================== |
|
|
; Branch does not require alignment, even stride |
|
|
;=========================================================================== |
|
|
|
|
|
%macro sad16x4_ul 1 |
|
|
|
|
|
movdqu xmm0,[esi] |
|
|
movdqu xmm1,[esi+ebx] |
|
|
movdqu xmm2,[esi+ebx*2] |
|
|
movdqu xmm3,[esi+ecx] |
|
|
|
|
|
add esi,edx |
|
|
|
|
|
movdqu xmm4,[edi] |
|
|
movdqu xmm5,[edi+ebx] |
|
|
movdqu xmm6,[edi+ebx*2] |
|
|
movdqu xmm7,[edi+ecx] |
|
|
|
|
|
add edi,edx |
|
|
|
|
|
psadbw xmm4,xmm0 |
|
|
psadbw xmm5,xmm1 |
|
|
psadbw xmm6,xmm2 |
|
|
psadbw xmm7,xmm3 |
|
|
|
|
|
paddusw xmm4,xmm5 |
|
|
paddusw xmm6,xmm7 |
|
|
|
|
|
paddusw xmm4,xmm6 |
|
|
pshufd xmm7,xmm4,11111110b |
|
|
paddusw xmm7,xmm4 |
|
|
|
|
|
pextrw %1,xmm7,0 |
|
|
%endmacro |
|
|
|
|
|
|
|
|
align 16 |
|
|
sad16_sse2_ul |
|
|
|
|
|
load_stride ebx |
|
|
|
|
|
push ebp |
|
|
|
|
|
sad16x4_ul eax |
|
|
|
|
|
%if early_return=1 |
|
|
cmp eax,[esp + 4*4 + 16] ;best_sad |
|
|
jg continue_ul |
|
|
%endif |
|
|
|
|
|
sad16x4_ul ebp |
|
|
add eax,ebp |
|
|
|
|
|
%if early_return=1 |
|
|
cmp eax,[esp + 4*4 + 16] ;best_sad |
|
|
jg continue_ul |
|
|
%endif |
|
|
|
|
|
sad16x4_ul ebp |
|
|
add eax,ebp |
|
|
|
|
|
%if early_return=1 |
|
|
cmp eax,[esp + 4*4 + 16] ;best_sad |
|
|
jg continue_ul |
|
|
%endif |
|
|
|
|
|
sad16x4_ul ebp |
|
|
add eax,ebp |
|
|
|
|
|
continue_ul: restore 1 |
|
|
|
|
|
ret |
|
|
|
|
|
;=========================================================================== |
|
|
; |
|
|
; uint32_t dev16_sse2(const uint8_t * const cur, |
|
|
; const uint32_t stride); |
|
|
; |
|
|
; experimental! |
|
|
; |
|
|
;=========================================================================== |
|
|
|
|
|
align 16 |
|
|
dev16_sse2 |
|
|
|
|
|
push ebx |
|
|
push esi |
|
|
push edi |
|
|
push ebp |
|
|
|
|
|
mov esi, [esp + 4*4 + 4] ; cur |
|
|
mov ebx, [esp + 4*4 + 8] ; stride |
|
|
mov edi, buffer |
|
|
|
|
|
%if dev_debug=1 |
|
|
jmp dev16_sse2_ul |
|
|
%endif |
|
|
|
|
|
%if dev_debug=2 |
|
|
jmp dev16_sse2_al |
|
|
%endif |
|
|
|
|
|
test esi,15 |
|
|
jnz near dev16_sse2_ul |
|
|
|
|
|
%if test_stride_alignment=1 |
|
|
test ebx,15 |
|
|
jnz dev16_sse2_ul |
|
|
%endif |
|
|
|
|
|
mov edi,esi |
|
|
jmp dev16_sse2_al |
|
|
|
|
|
;=========================================================================== |
|
|
; Branch requires alignment of both the cur and stride |
|
|
;=========================================================================== |
|
|
|
|
|
%macro make_mean 0 |
|
|
add eax,ebp ;mean 16-bit |
|
|
mov al,ah ;eax= {0 0 mean/256 mean/256} |
|
|
mov ebp,eax |
|
|
shl ebp,16 |
|
|
or eax,ebp |
|
|
%endmacro |
|
|
|
|
|
%macro sad_mean16x8_al 3 ;destination,0=zero,1=mean from eax,source |
|
|
|
|
|
%if %2=0 |
|
|
pxor xmm0,xmm0 |
|
|
%else |
|
|
movd xmm0,eax |
|
|
pshufd xmm0,xmm0,0 |
|
|
%endif |
|
|
movdqa xmm1,xmm0 |
|
|
movdqa xmm2,xmm0 |
|
|
movdqa xmm3,xmm0 |
|
|
movdqa xmm4,xmm0 |
|
|
movdqa xmm5,xmm0 |
|
|
movdqa xmm6,xmm0 |
|
|
movdqa xmm7,xmm0 |
|
|
|
|
|
sad8lines %3 |
|
|
|
|
|
after_sad %1 |
|
|
|
|
|
%endmacro |
|
|
|
|
|
align 16 |
|
|
dev16_sse2_al |
|
|
|
|
|
load_stride ebx |
|
|
|
|
|
sad_mean16x8_al eax,0,esi |
|
|
sad_mean16x8_al ebp,0,esi |
|
|
|
|
|
make_mean |
|
|
|
|
|
sad_mean16x8_al ebp,1,edi |
|
|
sad_mean16x8_al eax,1,edi |
|
|
|
|
|
add eax,ebp |
|
|
|
|
|
restore 1 |
|
|
|
|
|
ret |
|
|
|
|
|
;=========================================================================== |
|
|
; Branch does not require alignment |
|
|
;=========================================================================== |
|
|
|
|
|
%macro sad_mean16x8_ul 2 |
|
|
|
|
|
pxor xmm7,xmm7 |
|
|
|
|
|
movdqu xmm0,[%1] |
|
|
movdqu xmm1,[%1+ebx] |
|
|
movdqu xmm2,[%1+ebx*2] |
|
|
movdqu xmm3,[%1+ecx] |
|
|
|
|
|
add %1,edx |
|
|
|
|
|
movdqa [buffer+16*0],xmm0 |
|
|
movdqa [buffer+16*1],xmm1 |
|
|
movdqa [buffer+16*2],xmm2 |
|
|
movdqa [buffer+16*3],xmm3 |
|
|
|
|
|
movdqu xmm4,[%1] |
|
|
movdqu xmm5,[%1+ebx] |
|
|
movdqu xmm6,[%1+ebx*2] |
|
|
movdqa [buffer+16*4],xmm4 |
|
|
movdqa [buffer+16*5],xmm5 |
|
|
movdqa [buffer+16*6],xmm6 |
|
|
|
|
139 |
psadbw xmm0,xmm7 |
psadbw xmm0,xmm7 |
140 |
|
paddusw xmm6, xmm0 |
141 |
psadbw xmm1,xmm7 |
psadbw xmm1,xmm7 |
142 |
psadbw xmm2,xmm7 |
paddusw xmm6, xmm1 |
|
psadbw xmm3,xmm7 |
|
|
psadbw xmm4,xmm7 |
|
|
psadbw xmm5,xmm7 |
|
|
psadbw xmm6,xmm7 |
|
|
|
|
|
movdqu xmm7,[%1+ecx] |
|
|
movdqa [buffer+16*7],xmm7 |
|
|
psadbw xmm7,[zero] |
|
|
|
|
|
add %1,edx |
|
|
|
|
|
after_sad %2 |
|
143 |
%endmacro |
%endmacro |
144 |
|
|
|
align 16 |
|
|
dev16_sse2_ul |
|
|
|
|
|
load_stride ebx |
|
|
|
|
|
sad_mean16x8_ul esi,eax |
|
|
sad_mean16x8_ul esi,ebp |
|
145 |
|
|
146 |
make_mean |
%macro MEAN16_SSE2_SSE3 1 |
147 |
|
mov eax, [esp+ 4] ; src |
148 |
|
mov ecx, [esp+ 8] ; stride |
149 |
|
|
150 |
|
pxor xmm6, xmm6 ; accum |
151 |
|
pxor xmm7, xmm7 ; zero |
152 |
|
|
153 |
|
MEAN_16x16_SSE2 %1 |
154 |
|
MEAN_16x16_SSE2 %1 |
155 |
|
MEAN_16x16_SSE2 %1 |
156 |
|
MEAN_16x16_SSE2 %1 |
157 |
|
|
158 |
|
MEAN_16x16_SSE2 %1 |
159 |
|
MEAN_16x16_SSE2 %1 |
160 |
|
MEAN_16x16_SSE2 %1 |
161 |
|
MEAN_16x16_SSE2 %1 |
162 |
|
|
163 |
|
mov eax, [esp+ 4] ; src again |
164 |
|
|
165 |
|
pshufd xmm7, xmm6, 10b |
166 |
|
paddusw xmm7, xmm6 |
167 |
|
pxor xmm6, xmm6 ; zero accum |
168 |
|
psrlw xmm7, 8 ; => Mean |
169 |
|
pshuflw xmm7, xmm7, 0 ; replicate Mean |
170 |
|
packuswb xmm7, xmm7 |
171 |
|
pshufd xmm7, xmm7, 00000000b |
172 |
|
|
173 |
|
MEAN_16x16_SSE2 %1 |
174 |
|
MEAN_16x16_SSE2 %1 |
175 |
|
MEAN_16x16_SSE2 %1 |
176 |
|
MEAN_16x16_SSE2 %1 |
177 |
|
|
178 |
|
MEAN_16x16_SSE2 %1 |
179 |
|
MEAN_16x16_SSE2 %1 |
180 |
|
MEAN_16x16_SSE2 %1 |
181 |
|
MEAN_16x16_SSE2 %1 |
182 |
|
|
183 |
|
pshufd xmm7, xmm6, 10b |
184 |
|
paddusw xmm7, xmm6 |
185 |
|
pextrw eax, xmm7, 0 |
186 |
|
ret |
187 |
|
%endmacro |
188 |
|
|
189 |
sad_mean16x8_al ebp,1,edi |
ALIGN 16 |
190 |
sad_mean16x8_al eax,1,edi |
dev16_sse2: |
191 |
|
MEAN16_SSE2_SSE3 movdqu |
192 |
|
ENDFUNC |
193 |
|
|
194 |
|
ALIGN 16 |
195 |
|
dev16_sse3: |
196 |
|
MEAN16_SSE2_SSE3 lddqu |
197 |
|
ENDFUNC |
198 |
|
|
|
add eax,ebp |
|
199 |
|
|
200 |
restore 1 |
%ifidn __OUTPUT_FORMAT__,elf |
201 |
|
section ".note.GNU-stack" noalloc noexec nowrite progbits |
202 |
|
%endif |
203 |
|
|
|
ret |
|