1 |
;/************************************************************************** |
;/**************************************************************************** |
2 |
; * |
; * |
3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * xmm sum of absolute difference |
; * - K7 optimized SAD operators - |
5 |
; * |
; * |
6 |
; * This program is an implementation of a part of one or more MPEG-4 |
; * Copyright(C) 2001 Peter Ross <pross@xvid.org> |
7 |
; * Video tools as specified in ISO/IEC 14496-2 standard. Those intending |
; * 2001 Michael Militzer <isibaar@xvid.org> |
8 |
; * to use this software module in hardware or software products are |
; * 2002 Pascal Massimino <skal@planet-d.net> |
|
; * advised that its use may infringe existing patents or copyrights, and |
|
|
; * any such use would be at such party's own risk. The original |
|
|
; * developer of this software module and his/her company, and subsequent |
|
|
; * editors and their companies, will have no liability for use of this |
|
|
; * software or modifications or derivatives thereof. |
|
9 |
; * |
; * |
10 |
; * This program is free software; you can redistribute it and/or modify |
; * This program is free software; you can redistribute it and/or modify it |
11 |
; * it under the terms of the GNU General Public License as published by |
; * under the terms of the GNU General Public License as published by |
12 |
; * the Free Software Foundation; either version 2 of the License, or |
; * the Free Software Foundation; either version 2 of the License, or |
13 |
; * (at your option) any later version. |
; * (at your option) any later version. |
14 |
; * |
; * |
19 |
; * |
; * |
20 |
; * You should have received a copy of the GNU General Public License |
; * You should have received a copy of the GNU General Public License |
21 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
22 |
; * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
23 |
; * |
; * |
24 |
; *************************************************************************/ |
; * $Id: sad_xmm.asm,v 1.6.2.1 2003-10-28 22:23:03 edgomez Exp $ |
|
|
|
|
;/************************************************************************** |
|
|
; * |
|
|
; * History: |
|
|
; * |
|
|
; * 23.07.2002 sad8bi_xmm; <pross@xvid.org> |
|
|
; * 04.06.2002 rewrote some funcs (XMM mainly) -Skal- |
|
|
; * 17.11.2001 bugfix and small improvement for dev16_xmm, |
|
|
; * removed terminate early in sad16_xmm (Isibaar) |
|
|
; * 12.11.2001 inital version; (c)2001 peter ross <pross@cs.rmit.edu.au> |
|
25 |
; * |
; * |
26 |
; *************************************************************************/ |
; ***************************************************************************/ |
27 |
|
|
28 |
bits 32 |
BITS 32 |
29 |
|
|
30 |
%macro cglobal 1 |
%macro cglobal 1 |
31 |
%ifdef PREFIX |
%ifdef PREFIX |
36 |
%endif |
%endif |
37 |
%endmacro |
%endmacro |
38 |
|
|
39 |
section .data |
;============================================================================= |
40 |
|
; Read only data |
41 |
|
;============================================================================= |
42 |
|
|
43 |
|
SECTION .rodata |
44 |
|
|
45 |
|
ALIGN 16 |
46 |
|
mmx_one: times 4 dw 1 |
47 |
|
|
48 |
|
;============================================================================= |
49 |
|
; Helper macros |
50 |
|
;============================================================================= |
51 |
|
|
52 |
|
%macro SAD_16x16_SSE 0 |
53 |
|
movq mm0, [eax] |
54 |
|
psadbw mm0, [edx] |
55 |
|
movq mm1, [eax+8] |
56 |
|
add eax, ecx |
57 |
|
psadbw mm1, [edx+8] |
58 |
|
paddusw mm5, mm0 |
59 |
|
add edx, ecx |
60 |
|
paddusw mm6, mm1 |
61 |
|
%endmacro |
62 |
|
|
63 |
|
%macro SAD_8x8_SSE 0 |
64 |
|
movq mm0, [eax] |
65 |
|
movq mm1, [eax+ecx] |
66 |
|
psadbw mm0, [edx] |
67 |
|
psadbw mm1, [edx+ecx] |
68 |
|
add eax, ebx |
69 |
|
add edx, ebx |
70 |
|
paddusw mm5, mm0 |
71 |
|
paddusw mm6, mm1 |
72 |
|
%endmacro |
73 |
|
|
74 |
|
%macro SADBI_16x16_SSE 0 |
75 |
|
movq mm0, [eax] |
76 |
|
movq mm1, [eax+8] |
77 |
|
movq mm2, [edx] |
78 |
|
movq mm3, [edx+8] |
79 |
|
pavgb mm2, [ebx] |
80 |
|
add edx, ecx |
81 |
|
pavgb mm3, [ebx+8] |
82 |
|
add ebx, ecx |
83 |
|
psadbw mm0, mm2 |
84 |
|
add eax, ecx |
85 |
|
psadbw mm1, mm3 |
86 |
|
paddusw mm5, mm0 |
87 |
|
paddusw mm6, mm1 |
88 |
|
%endmacro |
89 |
|
|
90 |
|
%macro SADBI_8x8_XMM 0 |
91 |
|
movq mm0, [eax] |
92 |
|
movq mm1, [eax+ecx] |
93 |
|
movq mm2, [edx] |
94 |
|
movq mm3, [edx+ecx] |
95 |
|
pavgb mm2, [ebx] |
96 |
|
lea edx, [edx+2*ecx] |
97 |
|
pavgb mm3, [ebx+ecx] |
98 |
|
lea ebx, [ebx+2*ecx] |
99 |
|
psadbw mm0, mm2 |
100 |
|
lea eax, [eax+2*ecx] |
101 |
|
psadbw mm1, mm3 |
102 |
|
paddusw mm5, mm0 |
103 |
|
paddusw mm6, mm1 |
104 |
|
%endmacro |
105 |
|
|
106 |
|
%macro MEAN_16x16_SSE 0 |
107 |
|
movq mm0, [eax] |
108 |
|
movq mm1, [eax+8] |
109 |
|
psadbw mm0, mm7 |
110 |
|
psadbw mm1, mm7 |
111 |
|
add eax, ecx |
112 |
|
paddw mm5, mm0 |
113 |
|
paddw mm6, mm1 |
114 |
|
%endmacro |
115 |
|
|
116 |
|
%macro ABS_16x16_SSE 0 |
117 |
|
movq mm0, [eax] |
118 |
|
movq mm1, [eax+8] |
119 |
|
psadbw mm0, mm4 |
120 |
|
psadbw mm1, mm4 |
121 |
|
lea eax, [eax+ecx] |
122 |
|
paddw mm5, mm0 |
123 |
|
paddw mm6, mm1 |
124 |
|
%endmacro |
125 |
|
|
126 |
align 16 |
;============================================================================= |
127 |
mmx_one times 4 dw 1 |
; Code |
128 |
|
;============================================================================= |
129 |
|
|
130 |
section .text |
SECTION .text |
131 |
|
|
132 |
cglobal sad16_xmm |
cglobal sad16_xmm |
133 |
cglobal sad8_xmm |
cglobal sad8_xmm |
134 |
cglobal sad16bi_xmm |
cglobal sad16bi_xmm |
135 |
cglobal sad8bi_xmm |
cglobal sad8bi_xmm |
136 |
cglobal dev16_xmm |
cglobal dev16_xmm |
137 |
|
cglobal sad16v_xmm |
138 |
|
|
139 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
140 |
; |
; |
141 |
; uint32_t sad16_xmm(const uint8_t * const cur, |
; uint32_t sad16_xmm(const uint8_t * const cur, |
142 |
; const uint8_t * const ref, |
; const uint8_t * const ref, |
143 |
; const uint32_t stride, |
; const uint32_t stride, |
144 |
; const uint32_t best_sad); |
; const uint32_t best_sad); |
145 |
; |
; |
146 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
147 |
|
|
148 |
%macro SAD_16x16_SSE 0 |
ALIGN 16 |
|
movq mm0, [eax] |
|
|
psadbw mm0, [edx] |
|
|
movq mm1, [eax+8] |
|
|
add eax, ecx |
|
|
psadbw mm1, [edx+8] |
|
|
paddusw mm5,mm0 |
|
|
add edx, ecx |
|
|
paddusw mm6,mm1 |
|
|
%endmacro |
|
|
|
|
|
align 16 |
|
149 |
sad16_xmm: |
sad16_xmm: |
150 |
|
|
151 |
mov eax, [esp+ 4] ; Src1 |
mov eax, [esp+ 4] ; Src1 |
178 |
ret |
ret |
179 |
|
|
180 |
|
|
181 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
182 |
; |
; |
183 |
; uint32_t sad8_xmm(const uint8_t * const cur, |
; uint32_t sad8_xmm(const uint8_t * const cur, |
184 |
; const uint8_t * const ref, |
; const uint8_t * const ref, |
185 |
; const uint32_t stride); |
; const uint32_t stride); |
186 |
; |
; |
187 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
|
|
|
|
%macro SAD_8x8_SSE 0 |
|
|
movq mm0, [eax] |
|
|
movq mm1, [eax+ecx] |
|
|
|
|
|
psadbw mm0, [edx] |
|
|
psadbw mm1, [edx+ecx] |
|
|
add eax, ebx |
|
|
add edx, ebx |
|
|
|
|
|
paddusw mm5,mm0 |
|
|
paddusw mm6,mm1 |
|
|
%endmacro |
|
188 |
|
|
189 |
align 16 |
ALIGN 16 |
190 |
sad8_xmm: |
sad8_xmm: |
191 |
|
|
192 |
mov eax, [esp+ 4] ; Src1 |
mov eax, [esp+ 4] ; Src1 |
218 |
ret |
ret |
219 |
|
|
220 |
|
|
221 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
222 |
; |
; |
223 |
; uint32_t sad16bi_xmm(const uint8_t * const cur, |
; uint32_t sad16bi_xmm(const uint8_t * const cur, |
224 |
; const uint8_t * const ref1, |
; const uint8_t * const ref1, |
225 |
; const uint8_t * const ref2, |
; const uint8_t * const ref2, |
226 |
; const uint32_t stride); |
; const uint32_t stride); |
227 |
; |
; |
228 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
|
|
|
|
%macro SADBI_16x16_SSE 0 |
|
|
movq mm0, [eax] |
|
|
movq mm1, [eax+8] |
|
|
|
|
|
movq mm2, [edx] |
|
|
movq mm3, [edx+8] |
|
|
|
|
|
pavgb mm2, [ebx] |
|
|
add edx, ecx |
|
|
|
|
|
pavgb mm3, [ebx+8] |
|
|
add ebx, ecx |
|
|
|
|
|
psadbw mm0, mm2 |
|
|
add eax, ecx |
|
|
|
|
|
psadbw mm1, mm3 |
|
|
paddusw mm5,mm0 |
|
229 |
|
|
230 |
paddusw mm6,mm1 |
ALIGN 16 |
|
%endmacro |
|
|
|
|
|
align 16 |
|
231 |
sad16bi_xmm: |
sad16bi_xmm: |
232 |
push ebx |
push ebx |
233 |
mov eax, [esp+4+ 4] ; Src |
mov eax, [esp+4+ 4] ; Src |
261 |
pop ebx |
pop ebx |
262 |
ret |
ret |
263 |
|
|
264 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
265 |
; |
; |
266 |
; uint32_t sad8bi_xmm(const uint8_t * const cur, |
; uint32_t sad8bi_xmm(const uint8_t * const cur, |
267 |
; const uint8_t * const ref1, |
; const uint8_t * const ref1, |
268 |
; const uint8_t * const ref2, |
; const uint8_t * const ref2, |
269 |
; const uint32_t stride); |
; const uint32_t stride); |
270 |
; |
; |
271 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
272 |
|
|
273 |
%macro SADBI_8x8_XMM 0 |
ALIGN 16 |
|
movq mm0, [eax] |
|
|
movq mm1, [eax+ecx] |
|
|
|
|
|
movq mm2, [edx] |
|
|
movq mm3, [edx+ecx] |
|
|
|
|
|
pavgb mm2, [ebx] |
|
|
lea edx, [edx+2*ecx] |
|
|
|
|
|
pavgb mm3, [ebx+ecx] |
|
|
lea ebx, [ebx+2*ecx] |
|
|
|
|
|
psadbw mm0, mm2 |
|
|
lea eax, [eax+2*ecx] |
|
|
|
|
|
psadbw mm1, mm3 |
|
|
paddusw mm5,mm0 |
|
|
|
|
|
paddusw mm6,mm1 |
|
|
%endmacro |
|
|
|
|
|
align 16 |
|
274 |
sad8bi_xmm: |
sad8bi_xmm: |
275 |
push ebx |
push ebx |
276 |
mov eax, [esp+4+ 4] ; Src |
mov eax, [esp+4+ 4] ; Src |
292 |
ret |
ret |
293 |
|
|
294 |
|
|
295 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
296 |
; |
; |
297 |
; uint32_t dev16_xmm(const uint8_t * const cur, |
; uint32_t dev16_xmm(const uint8_t * const cur, |
298 |
; const uint32_t stride); |
; const uint32_t stride); |
299 |
; |
; |
300 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
301 |
|
|
302 |
%macro MEAN_16x16_SSE 0 |
ALIGN 16 |
|
movq mm0, [eax] |
|
|
movq mm1, [eax+8] |
|
|
psadbw mm0, mm7 |
|
|
psadbw mm1, mm7 |
|
|
add eax, ecx |
|
|
paddw mm5, mm0 |
|
|
paddw mm6, mm1 |
|
|
%endmacro |
|
|
|
|
|
%macro ABS_16x16_SSE 0 |
|
|
movq mm0, [eax] |
|
|
movq mm1, [eax+8] |
|
|
psadbw mm0, mm4 |
|
|
psadbw mm1, mm4 |
|
|
lea eax,[eax+ecx] |
|
|
paddw mm5, mm0 |
|
|
paddw mm6, mm1 |
|
|
%endmacro |
|
|
|
|
|
align 16 |
|
303 |
dev16_xmm: |
dev16_xmm: |
304 |
|
|
305 |
mov eax, [esp+ 4] ; Src |
mov eax, [esp+ 4] ; Src |
371 |
movd eax, mm6 |
movd eax, mm6 |
372 |
ret |
ret |
373 |
|
|
374 |
cglobal sad16v_xmm |
;----------------------------------------------------------------------------- |
|
|
|
|
;=========================================================================== |
|
375 |
;int sad16v_xmm(const uint8_t * const cur, |
;int sad16v_xmm(const uint8_t * const cur, |
376 |
; const uint8_t * const ref, |
; const uint8_t * const ref, |
377 |
; const uint32_t stride, |
; const uint32_t stride, |
378 |
; int* sad8); |
; int* sad8); |
379 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
380 |
align 16 |
|
381 |
|
ALIGN 16 |
382 |
sad16v_xmm: |
sad16v_xmm: |
383 |
push ebx |
push ebx |
384 |
mov eax, [esp+4+ 4] ; Src1 |
mov eax, [esp+4+ 4] ; Src1 |
389 |
pxor mm5, mm5 ; accum1 |
pxor mm5, mm5 ; accum1 |
390 |
pxor mm6, mm6 ; accum2 |
pxor mm6, mm6 ; accum2 |
391 |
pxor mm7, mm7 ; total |
pxor mm7, mm7 ; total |
392 |
|
|
393 |
SAD_16x16_SSE |
SAD_16x16_SSE |
394 |
SAD_16x16_SSE |
SAD_16x16_SSE |
395 |
SAD_16x16_SSE |
SAD_16x16_SSE |
398 |
SAD_16x16_SSE |
SAD_16x16_SSE |
399 |
SAD_16x16_SSE |
SAD_16x16_SSE |
400 |
SAD_16x16_SSE |
SAD_16x16_SSE |
401 |
|
|
402 |
paddusw mm7, mm5 |
paddusw mm7, mm5 |
403 |
paddusw mm7, mm6 |
paddusw mm7, mm6 |
404 |
movd [ebx], mm5 |
movd [ebx], mm5 |
406 |
|
|
407 |
pxor mm5, mm5 ; accum1 |
pxor mm5, mm5 ; accum1 |
408 |
pxor mm6, mm6 ; accum2 |
pxor mm6, mm6 ; accum2 |
409 |
|
|
410 |
SAD_16x16_SSE |
SAD_16x16_SSE |
411 |
SAD_16x16_SSE |
SAD_16x16_SSE |
412 |
SAD_16x16_SSE |
SAD_16x16_SSE |
415 |
SAD_16x16_SSE |
SAD_16x16_SSE |
416 |
SAD_16x16_SSE |
SAD_16x16_SSE |
417 |
SAD_16x16_SSE |
SAD_16x16_SSE |
418 |
|
|
419 |
paddusw mm7, mm5 |
paddusw mm7, mm5 |
420 |
paddusw mm7, mm6 |
paddusw mm7, mm6 |
421 |
movd [ebx+8], mm5 |
movd [ebx+8], mm5 |
424 |
movd eax, mm7 |
movd eax, mm7 |
425 |
pop ebx |
pop ebx |
426 |
ret |
ret |
|
;-------- |
|
|
|
|
|
|
|