20 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
21 |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
22 |
; * |
; * |
23 |
; * $Id: sad_sse2.asm,v 1.11 2004-08-22 11:46:10 edgomez Exp $ |
; * $Id: sad_sse2.asm,v 1.20 2009-09-16 17:07:58 Isibaar Exp $ |
24 |
; * |
; * |
25 |
; ***************************************************************************/ |
; ***************************************************************************/ |
26 |
|
|
27 |
BITS 32 |
%include "nasm.inc" |
|
|
|
|
%macro cglobal 1 |
|
|
%ifdef PREFIX |
|
|
%ifdef MARK_FUNCS |
|
|
global _%1:function |
|
|
%define %1 _%1:function |
|
|
%else |
|
|
global _%1 |
|
|
%define %1 _%1 |
|
|
%endif |
|
|
%else |
|
|
%ifdef MARK_FUNCS |
|
|
global %1:function |
|
|
%else |
|
|
global %1 |
|
|
%endif |
|
|
%endif |
|
|
%endmacro |
|
28 |
|
|
29 |
;============================================================================= |
;============================================================================= |
30 |
; Read only data |
; Read only data |
31 |
;============================================================================= |
;============================================================================= |
32 |
|
|
33 |
%ifdef FORMAT_COFF |
DATA |
|
SECTION .rodata |
|
|
%else |
|
|
SECTION .rodata align=16 |
|
|
%endif |
|
34 |
|
|
35 |
ALIGN 64 |
ALIGN SECTION_ALIGN |
36 |
zero times 4 dd 0 |
zero times 4 dd 0 |
37 |
|
|
38 |
;============================================================================= |
;============================================================================= |
39 |
; Code |
; Code |
40 |
;============================================================================= |
;============================================================================= |
41 |
|
|
42 |
SECTION .text |
TEXT |
43 |
|
|
44 |
cglobal sad16_sse2 |
cglobal sad16_sse2 |
45 |
cglobal dev16_sse2 |
cglobal dev16_sse2 |
46 |
|
|
47 |
|
cglobal sad16_sse3 |
48 |
|
cglobal dev16_sse3 |
49 |
|
|
50 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
51 |
; uint32_t sad16_sse2 (const uint8_t * const cur, <- assumed aligned! |
; uint32_t sad16_sse2 (const uint8_t * const cur, <- assumed aligned! |
52 |
; const uint8_t * const ref, |
; const uint8_t * const ref, |
55 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
56 |
|
|
57 |
|
|
58 |
%macro SAD_16x16_SSE2 0 |
%macro SAD_16x16_SSE2 1 |
59 |
movdqu xmm0, [edx] |
%1 xmm0, [TMP1] |
60 |
movdqu xmm1, [edx+ecx] |
%1 xmm1, [TMP1+TMP0] |
61 |
lea edx,[edx+2*ecx] |
lea TMP1,[TMP1+2*TMP0] |
62 |
movdqa xmm2, [eax] |
movdqa xmm2, [_EAX] |
63 |
movdqa xmm3, [eax+ecx] |
movdqa xmm3, [_EAX+TMP0] |
64 |
lea eax,[eax+2*ecx] |
lea _EAX,[_EAX+2*TMP0] |
65 |
psadbw xmm0, xmm2 |
psadbw xmm0, xmm2 |
66 |
paddusw xmm6,xmm0 |
paddusw xmm4,xmm0 |
67 |
psadbw xmm1, xmm3 |
psadbw xmm1, xmm3 |
68 |
paddusw xmm6,xmm1 |
paddusw xmm4,xmm1 |
69 |
%endmacro |
%endmacro |
70 |
|
|
71 |
ALIGN 16 |
%macro SAD16_SSE2_SSE3 1 |
72 |
sad16_sse2: |
mov _EAX, prm1 ; cur (assumed aligned) |
73 |
mov eax, [esp+ 4] ; cur (assumed aligned) |
mov TMP1, prm2 ; ref |
74 |
mov edx, [esp+ 8] ; ref |
mov TMP0, prm3 ; stride |
75 |
mov ecx, [esp+12] ; stride |
|
76 |
|
pxor xmm4, xmm4 ; accum |
77 |
pxor xmm6, xmm6 ; accum |
|
78 |
|
SAD_16x16_SSE2 %1 |
79 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
80 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
81 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
82 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
83 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
84 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
85 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
86 |
SAD_16x16_SSE2 |
|
87 |
|
pshufd xmm5, xmm4, 00000010b |
88 |
pshufd xmm5, xmm6, 00000010b |
paddusw xmm4, xmm5 |
89 |
paddusw xmm6, xmm5 |
pextrw eax, xmm4, 0 |
90 |
pextrw eax, xmm6, 0 |
|
91 |
ret |
ret |
92 |
|
%endmacro |
93 |
|
|
94 |
|
ALIGN SECTION_ALIGN |
95 |
|
sad16_sse2: |
96 |
|
SAD16_SSE2_SSE3 movdqu |
97 |
|
ENDFUNC |
98 |
|
|
99 |
|
|
100 |
|
ALIGN SECTION_ALIGN |
101 |
|
sad16_sse3: |
102 |
|
SAD16_SSE2_SSE3 lddqu |
103 |
|
ENDFUNC |
104 |
|
|
105 |
|
|
106 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
107 |
; uint32_t dev16_sse2(const uint8_t * const cur, const uint32_t stride); |
; uint32_t dev16_sse2(const uint8_t * const cur, const uint32_t stride); |
108 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
109 |
|
|
110 |
%macro MEAN_16x16_SSE2 0 ; eax: src, ecx:stride, mm7: zero or mean => mm6: result |
%macro MEAN_16x16_SSE2 1 ; _EAX: src, TMP0:stride, mm7: zero or mean => mm6: result |
111 |
movdqu xmm0, [eax] |
%1 xmm0, [_EAX] |
112 |
movdqu xmm1, [eax+ecx] |
%1 xmm1, [_EAX+TMP0] |
113 |
lea eax, [eax+2*ecx] ; + 2*stride |
lea _EAX, [_EAX+2*TMP0] ; + 2*stride |
114 |
psadbw xmm0, xmm7 |
psadbw xmm0, xmm5 |
115 |
paddusw xmm6, xmm0 |
paddusw xmm4, xmm0 |
116 |
psadbw xmm1, xmm7 |
psadbw xmm1, xmm5 |
117 |
paddusw xmm6, xmm1 |
paddusw xmm4, xmm1 |
118 |
%endmacro |
%endmacro |
119 |
|
|
120 |
|
|
121 |
ALIGN 16 |
%macro MEAN16_SSE2_SSE3 1 |
122 |
|
mov _EAX, prm1 ; src |
123 |
|
mov TMP0, prm2 ; stride |
124 |
|
|
125 |
|
pxor xmm4, xmm4 ; accum |
126 |
|
pxor xmm5, xmm5 ; zero |
127 |
|
|
128 |
|
MEAN_16x16_SSE2 %1 |
129 |
|
MEAN_16x16_SSE2 %1 |
130 |
|
MEAN_16x16_SSE2 %1 |
131 |
|
MEAN_16x16_SSE2 %1 |
132 |
|
|
133 |
|
MEAN_16x16_SSE2 %1 |
134 |
|
MEAN_16x16_SSE2 %1 |
135 |
|
MEAN_16x16_SSE2 %1 |
136 |
|
MEAN_16x16_SSE2 %1 |
137 |
|
|
138 |
|
mov _EAX, prm1 ; src again |
139 |
|
|
140 |
|
pshufd xmm5, xmm4, 10b |
141 |
|
paddusw xmm5, xmm4 |
142 |
|
pxor xmm4, xmm4 ; zero accum |
143 |
|
psrlw xmm5, 8 ; => Mean |
144 |
|
pshuflw xmm5, xmm5, 0 ; replicate Mean |
145 |
|
packuswb xmm5, xmm5 |
146 |
|
pshufd xmm5, xmm5, 00000000b |
147 |
|
|
148 |
|
MEAN_16x16_SSE2 %1 |
149 |
|
MEAN_16x16_SSE2 %1 |
150 |
|
MEAN_16x16_SSE2 %1 |
151 |
|
MEAN_16x16_SSE2 %1 |
152 |
|
|
153 |
|
MEAN_16x16_SSE2 %1 |
154 |
|
MEAN_16x16_SSE2 %1 |
155 |
|
MEAN_16x16_SSE2 %1 |
156 |
|
MEAN_16x16_SSE2 %1 |
157 |
|
|
158 |
|
pshufd xmm5, xmm4, 10b |
159 |
|
paddusw xmm5, xmm4 |
160 |
|
pextrw eax, xmm5, 0 |
161 |
|
|
162 |
|
ret |
163 |
|
%endmacro |
164 |
|
|
165 |
|
ALIGN SECTION_ALIGN |
166 |
dev16_sse2: |
dev16_sse2: |
167 |
mov eax, [esp+ 4] ; src |
MEAN16_SSE2_SSE3 movdqu |
168 |
mov ecx, [esp+ 8] ; stride |
ENDFUNC |
169 |
|
|
170 |
pxor xmm6, xmm6 ; accum |
ALIGN SECTION_ALIGN |
171 |
pxor xmm7, xmm7 ; zero |
dev16_sse3: |
172 |
|
MEAN16_SSE2_SSE3 lddqu |
173 |
|
ENDFUNC |
174 |
|
|
175 |
MEAN_16x16_SSE2 |
NON_EXEC_STACK |
|
MEAN_16x16_SSE2 |
|
|
MEAN_16x16_SSE2 |
|
|
MEAN_16x16_SSE2 |
|
|
|
|
|
MEAN_16x16_SSE2 |
|
|
MEAN_16x16_SSE2 |
|
|
MEAN_16x16_SSE2 |
|
|
MEAN_16x16_SSE2 |
|
|
|
|
|
mov eax, [esp+ 4] ; src again |
|
|
|
|
|
pshufd xmm7, xmm6, 10b |
|
|
paddusw xmm7, xmm6 |
|
|
pxor xmm6, xmm6 ; zero accum |
|
|
psrlw xmm7, 8 ; => Mean |
|
|
pshuflw xmm7, xmm7, 0 ; replicate Mean |
|
|
packuswb xmm7, xmm7 |
|
|
pshufd xmm7, xmm7, 00000000b |
|
|
|
|
|
MEAN_16x16_SSE2 |
|
|
MEAN_16x16_SSE2 |
|
|
MEAN_16x16_SSE2 |
|
|
MEAN_16x16_SSE2 |
|
|
|
|
|
MEAN_16x16_SSE2 |
|
|
MEAN_16x16_SSE2 |
|
|
MEAN_16x16_SSE2 |
|
|
MEAN_16x16_SSE2 |
|
|
|
|
|
pshufd xmm7, xmm6, 10b |
|
|
paddusw xmm7, xmm6 |
|
|
pextrw eax, xmm7, 0 |
|
|
ret |
|