20 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
21 |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
22 |
; * |
; * |
23 |
; * $Id: sad_sse2.asm,v 1.10 2004-07-24 11:46:08 edgomez Exp $ |
; * $Id: sad_sse2.asm,v 1.16 2008-11-26 01:04:34 Isibaar Exp $ |
24 |
; * |
; * |
25 |
; ***************************************************************************/ |
; ***************************************************************************/ |
26 |
|
|
27 |
BITS 32 |
%include "nasm.inc" |
|
|
|
|
%macro cglobal 1 |
|
|
%ifdef PREFIX |
|
|
global _%1 |
|
|
%define %1 _%1 |
|
|
%else |
|
|
global %1 |
|
|
%endif |
|
|
%endmacro |
|
28 |
|
|
29 |
;============================================================================= |
;============================================================================= |
30 |
; Read only data |
; Read only data |
31 |
;============================================================================= |
;============================================================================= |
32 |
|
|
33 |
%ifdef FORMAT_COFF |
DATA |
|
SECTION .rodata |
|
|
%else |
|
|
SECTION .rodata align=16 |
|
|
%endif |
|
34 |
|
|
35 |
ALIGN 64 |
ALIGN SECTION_ALIGN |
36 |
zero times 4 dd 0 |
zero times 4 dd 0 |
37 |
|
|
38 |
;============================================================================= |
;============================================================================= |
39 |
; Code |
; Code |
40 |
;============================================================================= |
;============================================================================= |
41 |
|
|
42 |
SECTION .text |
SECTION .rotext align=SECTION_ALIGN |
43 |
|
|
44 |
cglobal sad16_sse2 |
cglobal sad16_sse2 |
45 |
cglobal dev16_sse2 |
cglobal dev16_sse2 |
46 |
|
|
47 |
|
cglobal sad16_sse3 |
48 |
|
cglobal dev16_sse3 |
49 |
|
|
50 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
51 |
; uint32_t sad16_sse2 (const uint8_t * const cur, <- assumed aligned! |
; uint32_t sad16_sse2 (const uint8_t * const cur, <- assumed aligned! |
52 |
; const uint8_t * const ref, |
; const uint8_t * const ref, |
55 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
56 |
|
|
57 |
|
|
58 |
%macro SAD_16x16_SSE2 0 |
%macro SAD_16x16_SSE2 1 |
59 |
movdqu xmm0, [edx] |
%1 xmm0, [TMP1] |
60 |
movdqu xmm1, [edx+ecx] |
%1 xmm1, [TMP1+TMP0] |
61 |
lea edx,[edx+2*ecx] |
lea TMP1,[TMP1+2*TMP0] |
62 |
movdqa xmm2, [eax] |
movdqa xmm2, [_EAX] |
63 |
movdqa xmm3, [eax+ecx] |
movdqa xmm3, [_EAX+TMP0] |
64 |
lea eax,[eax+2*ecx] |
lea _EAX,[_EAX+2*TMP0] |
65 |
psadbw xmm0, xmm2 |
psadbw xmm0, xmm2 |
66 |
paddusw xmm6,xmm0 |
paddusw xmm6,xmm0 |
67 |
psadbw xmm1, xmm3 |
psadbw xmm1, xmm3 |
68 |
paddusw xmm6,xmm1 |
paddusw xmm6,xmm1 |
69 |
%endmacro |
%endmacro |
70 |
|
|
71 |
ALIGN 16 |
%macro SAD16_SSE2_SSE3 1 |
72 |
sad16_sse2: |
mov _EAX, prm1 ; cur (assumed aligned) |
73 |
mov eax, [esp+ 4] ; cur (assumed aligned) |
mov TMP1, prm2 ; ref |
74 |
mov edx, [esp+ 8] ; ref |
mov TMP0, prm3 ; stride |
|
mov ecx, [esp+12] ; stride |
|
75 |
|
|
76 |
pxor xmm6, xmm6 ; accum |
pxor xmm6, xmm6 ; accum |
77 |
|
|
78 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
79 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
80 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
81 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
82 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
83 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
84 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
85 |
SAD_16x16_SSE2 |
SAD_16x16_SSE2 %1 |
86 |
|
|
87 |
pshufd xmm5, xmm6, 00000010b |
pshufd xmm5, xmm6, 00000010b |
88 |
paddusw xmm6, xmm5 |
paddusw xmm6, xmm5 |
89 |
pextrw eax, xmm6, 0 |
pextrw eax, xmm6, 0 |
90 |
ret |
ret |
91 |
|
%endmacro |
92 |
|
|
93 |
|
ALIGN SECTION_ALIGN |
94 |
|
sad16_sse2: |
95 |
|
SAD16_SSE2_SSE3 movdqu |
96 |
|
ENDFUNC |
97 |
|
|
98 |
|
|
99 |
|
ALIGN SECTION_ALIGN |
100 |
|
sad16_sse3: |
101 |
|
SAD16_SSE2_SSE3 lddqu |
102 |
|
ENDFUNC |
103 |
|
|
104 |
|
|
105 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
106 |
; uint32_t dev16_sse2(const uint8_t * const cur, const uint32_t stride); |
; uint32_t dev16_sse2(const uint8_t * const cur, const uint32_t stride); |
107 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
108 |
|
|
109 |
%macro MEAN_16x16_SSE2 0 ; eax: src, ecx:stride, mm7: zero or mean => mm6: result |
%macro MEAN_16x16_SSE2 1 ; _EAX: src, TMP0:stride, mm7: zero or mean => mm6: result |
110 |
movdqu xmm0, [eax] |
%1 xmm0, [_EAX] |
111 |
movdqu xmm1, [eax+ecx] |
%1 xmm1, [_EAX+TMP0] |
112 |
lea eax, [eax+2*ecx] ; + 2*stride |
lea _EAX, [_EAX+2*TMP0] ; + 2*stride |
113 |
psadbw xmm0, xmm7 |
psadbw xmm0, xmm7 |
114 |
paddusw xmm6, xmm0 |
paddusw xmm6, xmm0 |
115 |
psadbw xmm1, xmm7 |
psadbw xmm1, xmm7 |
117 |
%endmacro |
%endmacro |
118 |
|
|
119 |
|
|
120 |
ALIGN 16 |
%macro MEAN16_SSE2_SSE3 1 |
121 |
dev16_sse2: |
mov _EAX, prm1 ; src |
122 |
mov eax, [esp+ 4] ; src |
mov TMP0, prm2 ; stride |
|
mov ecx, [esp+ 8] ; stride |
|
123 |
|
|
124 |
pxor xmm6, xmm6 ; accum |
pxor xmm6, xmm6 ; accum |
125 |
pxor xmm7, xmm7 ; zero |
pxor xmm7, xmm7 ; zero |
126 |
|
|
127 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
128 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
129 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
130 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
131 |
|
|
132 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
133 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
134 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
135 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
136 |
|
|
137 |
mov eax, [esp+ 4] ; src again |
mov _EAX, prm1 ; src again |
138 |
|
|
139 |
pshufd xmm7, xmm6, 10b |
pshufd xmm7, xmm6, 10b |
140 |
paddusw xmm7, xmm6 |
paddusw xmm7, xmm6 |
144 |
packuswb xmm7, xmm7 |
packuswb xmm7, xmm7 |
145 |
pshufd xmm7, xmm7, 00000000b |
pshufd xmm7, xmm7, 00000000b |
146 |
|
|
147 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
148 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
149 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
150 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
151 |
|
|
152 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
153 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
154 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
155 |
MEAN_16x16_SSE2 |
MEAN_16x16_SSE2 %1 |
156 |
|
|
157 |
pshufd xmm7, xmm6, 10b |
pshufd xmm7, xmm6, 10b |
158 |
paddusw xmm7, xmm6 |
paddusw xmm7, xmm6 |
159 |
pextrw eax, xmm7, 0 |
pextrw eax, xmm7, 0 |
160 |
ret |
ret |
161 |
|
%endmacro |
162 |
|
|
163 |
|
ALIGN SECTION_ALIGN |
164 |
|
dev16_sse2: |
165 |
|
MEAN16_SSE2_SSE3 movdqu |
166 |
|
ENDFUNC |
167 |
|
|
168 |
|
ALIGN SECTION_ALIGN |
169 |
|
dev16_sse3: |
170 |
|
MEAN16_SSE2_SSE3 lddqu |
171 |
|
ENDFUNC |
172 |
|
|
173 |
|
|
174 |
|
%ifidn __OUTPUT_FORMAT__,elf |
175 |
|
section ".note.GNU-stack" noalloc noexec nowrite progbits |
176 |
|
%endif |
177 |
|
|