20 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
21 |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
22 |
; * |
; * |
23 |
; * $Id: sad_sse2.asm,v 1.12 2004-08-29 10:02:38 edgomez Exp $ |
; * $Id: sad_sse2.asm,v 1.13 2006-12-06 19:55:07 Isibaar Exp $ |
24 |
; * |
; * |
25 |
; ***************************************************************************/ |
; ***************************************************************************/ |
26 |
|
|
66 |
cglobal sad16_sse2 |
cglobal sad16_sse2 |
67 |
cglobal dev16_sse2 |
cglobal dev16_sse2 |
68 |
|
|
69 |
|
cglobal sad16_sse3 |
70 |
|
cglobal dev16_sse3 |
71 |
|
|
72 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
73 |
; uint32_t sad16_sse2 (const uint8_t * const cur, <- assumed aligned! |
; uint32_t sad16_sse2 (const uint8_t * const cur, <- assumed aligned! |
74 |
; const uint8_t * const ref, |
; const uint8_t * const ref, |
77 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
78 |
|
|
79 |
|
|
80 |
%macro SAD_16x16_SSE2 0 |
%macro SAD_16x16_SSE2 1 |
81 |
movdqu xmm0, [edx] |
%1 xmm0, [edx] |
82 |
movdqu xmm1, [edx+ecx] |
%1 xmm1, [edx+ecx] |
83 |
lea edx,[edx+2*ecx] |
lea edx,[edx+2*ecx] |
84 |
movdqa xmm2, [eax] |
movdqa xmm2, [eax] |
85 |
movdqa xmm3, [eax+ecx] |
movdqa xmm3, [eax+ecx] |
90 |
paddusw xmm6,xmm1 |
paddusw xmm6,xmm1 |
91 |
%endmacro |
%endmacro |
92 |
|
|
93 |
ALIGN 16 |
%macro SAD16_SSE2_SSE3 1 |
|
sad16_sse2: |
|
94 |
mov eax, [esp+ 4] ; cur (assumed aligned) |
mov eax, [esp+ 4] ; cur (assumed aligned) |
95 |
mov edx, [esp+ 8] ; ref |
mov edx, [esp+ 8] ; ref |
96 |
mov ecx, [esp+12] ; stride |
mov ecx, [esp+12] ; stride |
97 |
|
|
98 |
pxor xmm6, xmm6 ; accum |
pxor xmm6, xmm6 ; accum |
99 |
|
|
100 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
101 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
102 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
103 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
104 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
105 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
106 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
107 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
108 |
|
|
109 |
pshufd xmm5, xmm6, 00000010b |
pshufd xmm5, xmm6, 00000010b |
110 |
paddusw xmm6, xmm5 |
paddusw xmm6, xmm5 |
111 |
pextrw eax, xmm6, 0 |
pextrw eax, xmm6, 0 |
112 |
ret |
ret |
113 |
|
%endmacro |
114 |
|
|
115 |
|
ALIGN 16 |
116 |
|
sad16_sse2: |
117 |
|
SAD16_SSE2_SSE3 movdqu |
118 |
|
.endfunc |
119 |
|
|
120 |
|
|
121 |
|
ALIGN 16 |
122 |
|
sad16_sse3: |
123 |
|
SAD16_SSE2_SSE3 lddqu |
124 |
.endfunc |
.endfunc |
125 |
|
|
126 |
|
|
128 |
; uint32_t dev16_sse2(const uint8_t * const cur, const uint32_t stride); |
; uint32_t dev16_sse2(const uint8_t * const cur, const uint32_t stride); |
129 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
130 |
|
|
131 |
%macro MEAN_16x16_SSE2 0 ; eax: src, ecx:stride, mm7: zero or mean => mm6: result |
%macro MEAN_16x16_SSE2 1 ; eax: src, ecx:stride, mm7: zero or mean => mm6: result |
132 |
movdqu xmm0, [eax] |
%1 xmm0, [eax] |
133 |
movdqu xmm1, [eax+ecx] |
%1 xmm1, [eax+ecx] |
134 |
lea eax, [eax+2*ecx] ; + 2*stride |
lea eax, [eax+2*ecx] ; + 2*stride |
135 |
psadbw xmm0, xmm7 |
psadbw xmm0, xmm7 |
136 |
paddusw xmm6, xmm0 |
paddusw xmm6, xmm0 |
139 |
%endmacro |
%endmacro |
140 |
|
|
141 |
|
|
142 |
ALIGN 16 |
%macro MEAN16_SSE2_SSE3 1 |
|
dev16_sse2: |
|
143 |
mov eax, [esp+ 4] ; src |
mov eax, [esp+ 4] ; src |
144 |
mov ecx, [esp+ 8] ; stride |
mov ecx, [esp+ 8] ; stride |
145 |
|
|
146 |
pxor xmm6, xmm6 ; accum |
pxor xmm6, xmm6 ; accum |
147 |
pxor xmm7, xmm7 ; zero |
pxor xmm7, xmm7 ; zero |
148 |
|
|
149 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
150 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
151 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
152 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
153 |
|
|
154 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
155 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
156 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
157 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
158 |
|
|
159 |
mov eax, [esp+ 4] ; src again |
mov eax, [esp+ 4] ; src again |
160 |
|
|
166 |
packuswb xmm7, xmm7 |
packuswb xmm7, xmm7 |
167 |
pshufd xmm7, xmm7, 00000000b |
pshufd xmm7, xmm7, 00000000b |
168 |
|
|
169 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
170 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
171 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
172 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
173 |
|
|
174 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
175 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
176 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
177 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
178 |
|
|
179 |
pshufd xmm7, xmm6, 10b |
pshufd xmm7, xmm6, 10b |
180 |
paddusw xmm7, xmm6 |
paddusw xmm7, xmm6 |
181 |
pextrw eax, xmm7, 0 |
pextrw eax, xmm7, 0 |
182 |
ret |
ret |
183 |
|
%endmacro |
184 |
|
|
185 |
|
ALIGN 16 |
186 |
|
dev16_sse2: |
187 |
|
MEAN16_SSE2_SSE3 movdqu |
188 |
.endfunc |
.endfunc |
189 |
|
|
190 |
|
ALIGN 16 |
191 |
|
dev16_sse3: |
192 |
|
MEAN16_SSE2_SSE3 lddqu |
193 |
|
.endfunc |