4 |
; * - K7 optimized SAD operators - |
; * - K7 optimized SAD operators - |
5 |
; * |
; * |
6 |
; * Copyright(C) 2001 Peter Ross <pross@xvid.org> |
; * Copyright(C) 2001 Peter Ross <pross@xvid.org> |
7 |
; * 2001 Michael Militzer <isibaar@xvid.org> |
; * 2001-2008 Michael Militzer <michael@xvid.org> |
8 |
; * 2002 Pascal Massimino <skal@planet-d.net> |
; * 2002 Pascal Massimino <skal@planet-d.net> |
9 |
; * |
; * |
10 |
; * This program is free software; you can redistribute it and/or modify it |
; * This program is free software; you can redistribute it and/or modify it |
21 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
22 |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
23 |
; * |
; * |
24 |
; * $Id: sad_xmm.asm,v 1.12 2008-11-11 20:46:24 Isibaar Exp $ |
; * $Id: sad_xmm.asm,v 1.13 2008-11-26 01:04:34 Isibaar Exp $ |
25 |
; * |
; * |
26 |
; ***************************************************************************/ |
; ***************************************************************************/ |
27 |
|
|
28 |
BITS 32 |
%include "nasm.inc" |
|
|
|
|
%macro cglobal 1 |
|
|
%ifdef PREFIX |
|
|
%ifdef MARK_FUNCS |
|
|
global _%1:function %1.endfunc-%1 |
|
|
%define %1 _%1:function %1.endfunc-%1 |
|
|
%define ENDFUNC .endfunc |
|
|
%else |
|
|
global _%1 |
|
|
%define %1 _%1 |
|
|
%define ENDFUNC |
|
|
%endif |
|
|
%else |
|
|
%ifdef MARK_FUNCS |
|
|
global %1:function %1.endfunc-%1 |
|
|
%define ENDFUNC .endfunc |
|
|
%else |
|
|
global %1 |
|
|
%define ENDFUNC |
|
|
%endif |
|
|
%endif |
|
|
%endmacro |
|
29 |
|
|
30 |
;============================================================================= |
;============================================================================= |
31 |
; Read only data |
; Read only data |
32 |
;============================================================================= |
;============================================================================= |
33 |
|
|
34 |
%ifdef FORMAT_COFF |
DATA |
|
SECTION .rodata |
|
|
%else |
|
|
SECTION .rodata align=16 |
|
|
%endif |
|
35 |
|
|
36 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
37 |
mmx_one: times 4 dw 1 |
mmx_one: times 4 dw 1 |
38 |
|
|
39 |
;============================================================================= |
;============================================================================= |
41 |
;============================================================================= |
;============================================================================= |
42 |
|
|
43 |
%macro SAD_16x16_SSE 0 |
%macro SAD_16x16_SSE 0 |
44 |
movq mm0, [eax] |
movq mm0, [_EAX] |
45 |
psadbw mm0, [edx] |
psadbw mm0, [TMP1] |
46 |
movq mm1, [eax+8] |
movq mm1, [_EAX+8] |
47 |
add eax, ecx |
add _EAX, TMP0 |
48 |
psadbw mm1, [edx+8] |
psadbw mm1, [TMP1+8] |
49 |
paddusw mm5, mm0 |
paddusw mm5, mm0 |
50 |
add edx, ecx |
add TMP1, TMP0 |
51 |
paddusw mm6, mm1 |
paddusw mm6, mm1 |
52 |
%endmacro |
%endmacro |
53 |
|
|
54 |
%macro SAD_8x8_SSE 0 |
%macro SAD_8x8_SSE 0 |
55 |
movq mm0, [eax] |
movq mm0, [_EAX] |
56 |
movq mm1, [eax+ecx] |
movq mm1, [_EAX+TMP0] |
57 |
psadbw mm0, [edx] |
psadbw mm0, [TMP1] |
58 |
psadbw mm1, [edx+ecx] |
psadbw mm1, [TMP1+TMP0] |
59 |
add eax, ebx |
add _EAX, _EBX |
60 |
add edx, ebx |
add TMP1, _EBX |
61 |
paddusw mm5, mm0 |
paddusw mm5, mm0 |
62 |
paddusw mm6, mm1 |
paddusw mm6, mm1 |
63 |
%endmacro |
%endmacro |
64 |
|
|
65 |
%macro SADBI_16x16_SSE 0 |
%macro SADBI_16x16_SSE 0 |
66 |
movq mm0, [eax] |
movq mm0, [_EAX] |
67 |
movq mm1, [eax+8] |
movq mm1, [_EAX+8] |
68 |
movq mm2, [edx] |
movq mm2, [TMP1] |
69 |
movq mm3, [edx+8] |
movq mm3, [TMP1+8] |
70 |
pavgb mm2, [ebx] |
pavgb mm2, [_EBX] |
71 |
add edx, ecx |
add TMP1, TMP0 |
72 |
pavgb mm3, [ebx+8] |
pavgb mm3, [_EBX+8] |
73 |
add ebx, ecx |
add _EBX, TMP0 |
74 |
psadbw mm0, mm2 |
psadbw mm0, mm2 |
75 |
add eax, ecx |
add _EAX, TMP0 |
76 |
psadbw mm1, mm3 |
psadbw mm1, mm3 |
77 |
paddusw mm5, mm0 |
paddusw mm5, mm0 |
78 |
paddusw mm6, mm1 |
paddusw mm6, mm1 |
79 |
%endmacro |
%endmacro |
80 |
|
|
81 |
%macro SADBI_8x8_XMM 0 |
%macro SADBI_8x8_XMM 0 |
82 |
movq mm0, [eax] |
movq mm0, [_EAX] |
83 |
movq mm1, [eax+ecx] |
movq mm1, [_EAX+TMP0] |
84 |
movq mm2, [edx] |
movq mm2, [TMP1] |
85 |
movq mm3, [edx+ecx] |
movq mm3, [TMP1+TMP0] |
86 |
pavgb mm2, [ebx] |
pavgb mm2, [_EBX] |
87 |
lea edx, [edx+2*ecx] |
lea TMP1, [TMP1+2*TMP0] |
88 |
pavgb mm3, [ebx+ecx] |
pavgb mm3, [_EBX+TMP0] |
89 |
lea ebx, [ebx+2*ecx] |
lea _EBX, [_EBX+2*TMP0] |
90 |
psadbw mm0, mm2 |
psadbw mm0, mm2 |
91 |
lea eax, [eax+2*ecx] |
lea _EAX, [_EAX+2*TMP0] |
92 |
psadbw mm1, mm3 |
psadbw mm1, mm3 |
93 |
paddusw mm5, mm0 |
paddusw mm5, mm0 |
94 |
paddusw mm6, mm1 |
paddusw mm6, mm1 |
95 |
%endmacro |
%endmacro |
96 |
|
|
97 |
%macro MEAN_16x16_SSE 0 |
%macro MEAN_16x16_SSE 0 |
98 |
movq mm0, [eax] |
movq mm0, [_EAX] |
99 |
movq mm1, [eax+8] |
movq mm1, [_EAX+8] |
100 |
psadbw mm0, mm7 |
psadbw mm0, mm7 |
101 |
psadbw mm1, mm7 |
psadbw mm1, mm7 |
102 |
add eax, ecx |
add _EAX, TMP0 |
103 |
paddw mm5, mm0 |
paddw mm5, mm0 |
104 |
paddw mm6, mm1 |
paddw mm6, mm1 |
105 |
%endmacro |
%endmacro |
106 |
|
|
107 |
%macro ABS_16x16_SSE 0 |
%macro ABS_16x16_SSE 0 |
108 |
movq mm0, [eax] |
movq mm0, [_EAX] |
109 |
movq mm1, [eax+8] |
movq mm1, [_EAX+8] |
110 |
psadbw mm0, mm4 |
psadbw mm0, mm4 |
111 |
psadbw mm1, mm4 |
psadbw mm1, mm4 |
112 |
lea eax, [eax+ecx] |
lea _EAX, [_EAX+TMP0] |
113 |
paddw mm5, mm0 |
paddw mm5, mm0 |
114 |
paddw mm6, mm1 |
paddw mm6, mm1 |
115 |
%endmacro |
%endmacro |
118 |
; Code |
; Code |
119 |
;============================================================================= |
;============================================================================= |
120 |
|
|
121 |
SECTION .text |
SECTION .rotext align=SECTION_ALIGN |
122 |
|
|
123 |
cglobal sad16_xmm |
cglobal sad16_xmm |
124 |
cglobal sad8_xmm |
cglobal sad8_xmm |
136 |
; |
; |
137 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
138 |
|
|
139 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
140 |
sad16_xmm: |
sad16_xmm: |
141 |
|
|
142 |
mov eax, [esp+ 4] ; Src1 |
mov _EAX, prm1 ; Src1 |
143 |
mov edx, [esp+ 8] ; Src2 |
mov TMP1, prm2 ; Src2 |
144 |
mov ecx, [esp+12] ; Stride |
mov TMP0, prm3 ; Stride |
145 |
|
|
146 |
pxor mm5, mm5 ; accum1 |
pxor mm5, mm5 ; accum1 |
147 |
pxor mm6, mm6 ; accum2 |
pxor mm6, mm6 ; accum2 |
178 |
; |
; |
179 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
180 |
|
|
181 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
182 |
sad8_xmm: |
sad8_xmm: |
183 |
|
|
184 |
mov eax, [esp+ 4] ; Src1 |
mov _EAX, prm1 ; Src1 |
185 |
mov edx, [esp+ 8] ; Src2 |
mov TMP1, prm2 ; Src2 |
186 |
mov ecx, [esp+12] ; Stride |
mov TMP0, prm3 ; Stride |
187 |
push ebx |
push _EBX |
188 |
lea ebx, [ecx+ecx] |
lea _EBX, [TMP0+TMP0] |
189 |
|
|
190 |
pxor mm5, mm5 ; accum1 |
pxor mm5, mm5 ; accum1 |
191 |
pxor mm6, mm6 ; accum2 |
pxor mm6, mm6 ; accum2 |
194 |
SAD_8x8_SSE |
SAD_8x8_SSE |
195 |
SAD_8x8_SSE |
SAD_8x8_SSE |
196 |
|
|
197 |
movq mm0, [eax] |
movq mm0, [_EAX] |
198 |
movq mm1, [eax+ecx] |
movq mm1, [_EAX+TMP0] |
199 |
psadbw mm0, [edx] |
psadbw mm0, [TMP1] |
200 |
psadbw mm1, [edx+ecx] |
psadbw mm1, [TMP1+TMP0] |
201 |
|
|
202 |
pop ebx |
pop _EBX |
203 |
|
|
204 |
paddusw mm5,mm0 |
paddusw mm5,mm0 |
205 |
paddusw mm6,mm1 |
paddusw mm6,mm1 |
220 |
; |
; |
221 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
222 |
|
|
223 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
224 |
sad16bi_xmm: |
sad16bi_xmm: |
225 |
push ebx |
mov _EAX, prm1 ; Src |
226 |
mov eax, [esp+4+ 4] ; Src |
mov TMP1, prm2 ; Ref1 |
227 |
mov edx, [esp+4+ 8] ; Ref1 |
mov TMP0, prm4 ; Stride |
228 |
mov ebx, [esp+4+12] ; Ref2 |
|
229 |
mov ecx, [esp+4+16] ; Stride |
push _EBX |
230 |
|
%ifdef ARCH_IS_X86_64 |
231 |
|
mov _EBX, prm3 |
232 |
|
%else |
233 |
|
mov _EBX, [_ESP+4+12] ; Ref2 |
234 |
|
%endif |
235 |
pxor mm5, mm5 ; accum1 |
pxor mm5, mm5 ; accum1 |
236 |
pxor mm6, mm6 ; accum2 |
pxor mm6, mm6 ; accum2 |
237 |
|
|
255 |
|
|
256 |
paddusw mm6,mm5 |
paddusw mm6,mm5 |
257 |
movd eax, mm6 |
movd eax, mm6 |
258 |
pop ebx |
pop _EBX |
259 |
ret |
ret |
260 |
ENDFUNC |
ENDFUNC |
261 |
|
|
268 |
; |
; |
269 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
270 |
|
|
271 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
272 |
sad8bi_xmm: |
sad8bi_xmm: |
273 |
push ebx |
mov _EAX, prm1 ; Src |
274 |
mov eax, [esp+4+ 4] ; Src |
mov TMP1, prm2 ; Ref1 |
275 |
mov edx, [esp+4+ 8] ; Ref1 |
mov TMP0, prm4 ; Stride |
276 |
mov ebx, [esp+4+12] ; Ref2 |
|
277 |
mov ecx, [esp+4+16] ; Stride |
push _EBX |
278 |
|
%ifdef ARCH_IS_X86_64 |
279 |
|
mov _EBX, prm3 |
280 |
|
%else |
281 |
|
mov _EBX, [_ESP+4+12] ; Ref2 |
282 |
|
%endif |
283 |
|
|
284 |
pxor mm5, mm5 ; accum1 |
pxor mm5, mm5 ; accum1 |
285 |
pxor mm6, mm6 ; accum2 |
pxor mm6, mm6 ; accum2 |
291 |
|
|
292 |
paddusw mm6,mm5 |
paddusw mm6,mm5 |
293 |
movd eax, mm6 |
movd eax, mm6 |
294 |
pop ebx |
pop _EBX |
295 |
ret |
ret |
296 |
ENDFUNC |
ENDFUNC |
297 |
|
|
303 |
; |
; |
304 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
305 |
|
|
306 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
307 |
dev16_xmm: |
dev16_xmm: |
308 |
|
|
309 |
mov eax, [esp+ 4] ; Src |
mov _EAX, prm1 ; Src |
310 |
mov ecx, [esp+ 8] ; Stride |
mov TMP0, prm2 ; Stride |
311 |
|
|
312 |
pxor mm7, mm7 ; zero |
pxor mm7, mm7 ; zero |
313 |
pxor mm5, mm5 ; mean accums |
pxor mm5, mm5 ; mean accums |
343 |
|
|
344 |
; mm4 contains the mean |
; mm4 contains the mean |
345 |
|
|
346 |
mov eax, [esp+ 4] ; Src |
mov _EAX, prm1 ; Src |
347 |
|
|
348 |
|
|
349 |
pxor mm5, mm5 ; sums |
pxor mm5, mm5 ; sums |
383 |
; int* sad8); |
; int* sad8); |
384 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
385 |
|
|
386 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
387 |
sad16v_xmm: |
sad16v_xmm: |
388 |
push ebx |
mov _EAX, prm1 ; Src1 |
389 |
mov eax, [esp+4+ 4] ; Src1 |
mov TMP1, prm2 ; Src2 |
390 |
mov edx, [esp+4+ 8] ; Src2 |
mov TMP0, prm3 ; Stride |
391 |
mov ecx, [esp+4+12] ; Stride |
|
392 |
mov ebx, [esp+4+16] ; sad ptr |
push _EBX |
393 |
|
%ifdef ARCH_IS_X86_64 |
394 |
|
mov _EBX, prm4 |
395 |
|
%else |
396 |
|
mov _EBX, [_ESP+4+16] ; sad ptr |
397 |
|
%endif |
398 |
|
|
399 |
pxor mm5, mm5 ; accum1 |
pxor mm5, mm5 ; accum1 |
400 |
pxor mm6, mm6 ; accum2 |
pxor mm6, mm6 ; accum2 |
411 |
|
|
412 |
paddusw mm7, mm5 |
paddusw mm7, mm5 |
413 |
paddusw mm7, mm6 |
paddusw mm7, mm6 |
414 |
movd [ebx], mm5 |
movd [_EBX], mm5 |
415 |
movd [ebx+4], mm6 |
movd [_EBX+4], mm6 |
416 |
|
|
417 |
pxor mm5, mm5 ; accum1 |
pxor mm5, mm5 ; accum1 |
418 |
pxor mm6, mm6 ; accum2 |
pxor mm6, mm6 ; accum2 |
428 |
|
|
429 |
paddusw mm7, mm5 |
paddusw mm7, mm5 |
430 |
paddusw mm7, mm6 |
paddusw mm7, mm6 |
431 |
movd [ebx+8], mm5 |
movd [_EBX+8], mm5 |
432 |
movd [ebx+12], mm6 |
movd [_EBX+12], mm6 |
433 |
|
|
434 |
movd eax, mm7 |
movd eax, mm7 |
435 |
pop ebx |
pop _EBX |
436 |
ret |
ret |
437 |
ENDFUNC |
ENDFUNC |
438 |
|
|