4 |
; * - 3dnow 8x8 block-based halfpel interpolation - |
; * - 3dnow 8x8 block-based halfpel interpolation - |
5 |
; * |
; * |
6 |
; * Copyright(C) 2001 Peter Ross <pross@xvid.org> |
; * Copyright(C) 2001 Peter Ross <pross@xvid.org> |
7 |
; * 2002 Michael Militzer <isibaar@xvid.org> |
; * 2002-2008 Michael Militzer <michael@xvid.org> |
8 |
; * 2002 Pascal Massimino <skal@planet-d.net> |
; * 2002 Pascal Massimino <skal@planet-d.net> |
9 |
; * |
; * |
10 |
; * This program is free software ; you can redistribute it and/or modify |
; * This program is free software ; you can redistribute it and/or modify |
23 |
; * |
; * |
24 |
; ****************************************************************************/ |
; ****************************************************************************/ |
25 |
|
|
26 |
BITS 32 |
%include "nasm.inc" |
|
|
|
|
%macro cglobal 1 |
|
|
%ifdef PREFIX |
|
|
%ifdef MARK_FUNCS |
|
|
global _%1:function %1.endfunc-%1 |
|
|
%define %1 _%1:function %1.endfunc-%1 |
|
|
%define ENDFUNC .endfunc |
|
|
%else |
|
|
global _%1 |
|
|
%define %1 _%1 |
|
|
%define ENDFUNC |
|
|
%endif |
|
|
%else |
|
|
%ifdef MARK_FUNCS |
|
|
global %1:function %1.endfunc-%1 |
|
|
%define ENDFUNC .endfunc |
|
|
%else |
|
|
global %1 |
|
|
%define ENDFUNC |
|
|
%endif |
|
|
%endif |
|
|
%endmacro |
|
27 |
|
|
28 |
;============================================================================= |
;============================================================================= |
29 |
; Read Only data |
; Read Only data |
30 |
;============================================================================= |
;============================================================================= |
31 |
|
|
32 |
%ifdef FORMAT_COFF |
DATA |
|
SECTION .rodata |
|
|
%else |
|
|
SECTION .rodata align=16 |
|
|
%endif |
|
33 |
|
|
34 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
35 |
mmx_one: |
mmx_one: |
36 |
times 8 db 1 |
times 8 db 1 |
37 |
|
|
39 |
; Code |
; Code |
40 |
;============================================================================= |
;============================================================================= |
41 |
|
|
42 |
SECTION .text |
SECTION .rotext align=SECTION_ALIGN |
43 |
|
|
44 |
cglobal interpolate8x8_halfpel_h_3dn |
cglobal interpolate8x8_halfpel_h_3dn |
45 |
cglobal interpolate8x8_halfpel_v_3dn |
cglobal interpolate8x8_halfpel_v_3dn |
59 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
60 |
|
|
61 |
%macro COPY_H_3DN_RND0 0 |
%macro COPY_H_3DN_RND0 0 |
62 |
movq mm0, [eax] |
movq mm0, [_EAX] |
63 |
pavgusb mm0, [eax+1] |
pavgusb mm0, [_EAX+1] |
64 |
movq mm1, [eax+edx] |
movq mm1, [_EAX+TMP1] |
65 |
pavgusb mm1, [eax+edx+1] |
pavgusb mm1, [_EAX+TMP1+1] |
66 |
lea eax, [eax+2*edx] |
lea _EAX, [_EAX+2*TMP1] |
67 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
68 |
movq [ecx+edx], mm1 |
movq [TMP0+TMP1], mm1 |
69 |
%endmacro |
%endmacro |
70 |
|
|
71 |
%macro COPY_H_3DN_RND1 0 |
%macro COPY_H_3DN_RND1 0 |
72 |
movq mm0, [eax] |
movq mm0, [_EAX] |
73 |
movq mm1, [eax+edx] |
movq mm1, [_EAX+TMP1] |
74 |
movq mm4, mm0 |
movq mm4, mm0 |
75 |
movq mm5, mm1 |
movq mm5, mm1 |
76 |
movq mm2, [eax+1] |
movq mm2, [_EAX+1] |
77 |
movq mm3, [eax+edx+1] |
movq mm3, [_EAX+TMP1+1] |
78 |
pavgusb mm0, mm2 |
pavgusb mm0, mm2 |
79 |
pxor mm2, mm4 |
pxor mm2, mm4 |
80 |
pavgusb mm1, mm3 |
pavgusb mm1, mm3 |
81 |
lea eax, [eax+2*edx] |
lea _EAX, [_EAX+2*TMP1] |
82 |
pxor mm3, mm5 |
pxor mm3, mm5 |
83 |
pand mm2, mm7 |
pand mm2, mm7 |
84 |
pand mm3, mm7 |
pand mm3, mm7 |
85 |
psubb mm0, mm2 |
psubb mm0, mm2 |
86 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
87 |
psubb mm1, mm3 |
psubb mm1, mm3 |
88 |
movq [ecx+edx], mm1 |
movq [TMP0+TMP1], mm1 |
89 |
%endmacro |
%endmacro |
90 |
|
|
91 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
92 |
interpolate8x8_halfpel_h_3dn: |
interpolate8x8_halfpel_h_3dn: |
93 |
|
|
94 |
mov eax, [esp+16] ; rounding |
mov _EAX, prm4 ; rounding |
95 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
96 |
test eax, eax |
test _EAX, _EAX |
97 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
98 |
mov edx, [esp+12] ; stride |
mov TMP1, prm3 ; stride |
99 |
|
|
100 |
jnz near .rounding1 |
jnz near .rounding1 |
101 |
|
|
102 |
COPY_H_3DN_RND0 |
COPY_H_3DN_RND0 |
103 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
104 |
COPY_H_3DN_RND0 |
COPY_H_3DN_RND0 |
105 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
106 |
COPY_H_3DN_RND0 |
COPY_H_3DN_RND0 |
107 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
108 |
COPY_H_3DN_RND0 |
COPY_H_3DN_RND0 |
109 |
ret |
ret |
110 |
|
|
112 |
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
113 |
movq mm7, [mmx_one] |
movq mm7, [mmx_one] |
114 |
COPY_H_3DN_RND1 |
COPY_H_3DN_RND1 |
115 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
116 |
COPY_H_3DN_RND1 |
COPY_H_3DN_RND1 |
117 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
118 |
COPY_H_3DN_RND1 |
COPY_H_3DN_RND1 |
119 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
120 |
COPY_H_3DN_RND1 |
COPY_H_3DN_RND1 |
121 |
ret |
ret |
122 |
ENDFUNC |
ENDFUNC |
132 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
133 |
|
|
134 |
%macro COPY_V_3DN_RND0 0 |
%macro COPY_V_3DN_RND0 0 |
135 |
movq mm0, [eax] |
movq mm0, [_EAX] |
136 |
movq mm1, [eax+edx] |
movq mm1, [_EAX+TMP1] |
137 |
pavgusb mm0, mm1 |
pavgusb mm0, mm1 |
138 |
pavgusb mm1, [eax+2*edx] |
pavgusb mm1, [_EAX+2*TMP1] |
139 |
lea eax, [eax+2*edx] |
lea _EAX, [_EAX+2*TMP1] |
140 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
141 |
movq [ecx+edx], mm1 |
movq [TMP0+TMP1], mm1 |
142 |
%endmacro |
%endmacro |
143 |
|
|
144 |
%macro COPY_V_3DN_RND1 0 |
%macro COPY_V_3DN_RND1 0 |
145 |
movq mm0, mm2 |
movq mm0, mm2 |
146 |
movq mm1, [eax] |
movq mm1, [_EAX] |
147 |
movq mm2, [eax+edx] |
movq mm2, [_EAX+TMP1] |
148 |
lea eax, [eax+2*edx] |
lea _EAX, [_EAX+2*TMP1] |
149 |
movq mm4, mm0 |
movq mm4, mm0 |
150 |
movq mm5, mm1 |
movq mm5, mm1 |
151 |
pavgusb mm0, mm1 |
pavgusb mm0, mm1 |
155 |
pand mm4, mm7 ; lsb's of (i^j)... |
pand mm4, mm7 ; lsb's of (i^j)... |
156 |
pand mm5, mm7 ; lsb's of (i^j)... |
pand mm5, mm7 ; lsb's of (i^j)... |
157 |
psubb mm0, mm4 ; ...are substracted from result of pavgusb |
psubb mm0, mm4 ; ...are substracted from result of pavgusb |
158 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
159 |
psubb mm1, mm5 ; ...are substracted from result of pavgusb |
psubb mm1, mm5 ; ...are substracted from result of pavgusb |
160 |
movq [ecx+edx], mm1 |
movq [TMP0+TMP1], mm1 |
161 |
%endmacro |
%endmacro |
162 |
|
|
163 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
164 |
interpolate8x8_halfpel_v_3dn: |
interpolate8x8_halfpel_v_3dn: |
165 |
|
|
166 |
mov eax, [esp+16] ; rounding |
mov _EAX, prm4 ; rounding |
167 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
168 |
test eax,eax |
test _EAX,_EAX |
169 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
170 |
mov edx, [esp+12] ; stride |
mov TMP1, prm3 ; stride |
171 |
|
|
172 |
; we process 2 line at a time |
; we process 2 line at a time |
173 |
|
|
174 |
jnz near .rounding1 |
jnz near .rounding1 |
175 |
|
|
176 |
COPY_V_3DN_RND0 |
COPY_V_3DN_RND0 |
177 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
178 |
COPY_V_3DN_RND0 |
COPY_V_3DN_RND0 |
179 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
180 |
COPY_V_3DN_RND0 |
COPY_V_3DN_RND0 |
181 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
182 |
COPY_V_3DN_RND0 |
COPY_V_3DN_RND0 |
183 |
ret |
ret |
184 |
|
|
185 |
.rounding1: |
.rounding1: |
186 |
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
187 |
movq mm7, [mmx_one] |
movq mm7, [mmx_one] |
188 |
movq mm2, [eax] ; loop invariant |
movq mm2, [_EAX] ; loop invariant |
189 |
add eax, edx |
add _EAX, TMP1 |
190 |
|
|
191 |
COPY_V_3DN_RND1 |
COPY_V_3DN_RND1 |
192 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
193 |
COPY_V_3DN_RND1 |
COPY_V_3DN_RND1 |
194 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
195 |
COPY_V_3DN_RND1 |
COPY_V_3DN_RND1 |
196 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
197 |
COPY_V_3DN_RND1 |
COPY_V_3DN_RND1 |
198 |
ret |
ret |
199 |
ENDFUNC |
ENDFUNC |
221 |
; Moreover, we process 2 lines at a times, for better overlapping (~15% faster). |
; Moreover, we process 2 lines at a times, for better overlapping (~15% faster). |
222 |
|
|
223 |
%macro COPY_HV_3DN_RND0 0 |
%macro COPY_HV_3DN_RND0 0 |
224 |
lea eax, [eax+edx] |
lea _EAX, [_EAX+TMP1] |
225 |
|
|
226 |
movq mm0, [eax] |
movq mm0, [_EAX] |
227 |
movq mm1, [eax+1] |
movq mm1, [_EAX+1] |
228 |
|
|
229 |
movq mm6, mm0 |
movq mm6, mm0 |
230 |
pavgusb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step |
pavgusb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step |
231 |
lea eax, [eax+edx] |
lea _EAX, [_EAX+TMP1] |
232 |
pxor mm1, mm6 ; mm1=(j^k). preserved for next step |
pxor mm1, mm6 ; mm1=(j^k). preserved for next step |
233 |
|
|
234 |
por mm3, mm1 ; ij |= jk |
por mm3, mm1 ; ij |= jk |
239 |
pand mm3, mm7 ; mask lsb |
pand mm3, mm7 ; mask lsb |
240 |
psubb mm2, mm3 ; apply. |
psubb mm2, mm3 ; apply. |
241 |
|
|
242 |
movq [ecx], mm2 |
movq [TMP0], mm2 |
243 |
|
|
244 |
movq mm2, [eax] |
movq mm2, [_EAX] |
245 |
movq mm3, [eax+1] |
movq mm3, [_EAX+1] |
246 |
movq mm6, mm2 |
movq mm6, mm2 |
247 |
pavgusb mm2, mm3 ; preserved for next iteration |
pavgusb mm2, mm3 ; preserved for next iteration |
248 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
249 |
pxor mm3, mm6 ; preserved for next iteration |
pxor mm3, mm6 ; preserved for next iteration |
250 |
|
|
251 |
por mm1, mm3 |
por mm1, mm3 |
257 |
pand mm1, mm7 |
pand mm1, mm7 |
258 |
psubb mm0, mm1 |
psubb mm0, mm1 |
259 |
|
|
260 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
261 |
%endmacro |
%endmacro |
262 |
|
|
263 |
%macro COPY_HV_3DN_RND1 0 |
%macro COPY_HV_3DN_RND1 0 |
264 |
lea eax,[eax+edx] |
lea _EAX,[_EAX+TMP1] |
265 |
|
|
266 |
movq mm0, [eax] |
movq mm0, [_EAX] |
267 |
movq mm1, [eax+1] |
movq mm1, [_EAX+1] |
268 |
|
|
269 |
movq mm6, mm0 |
movq mm6, mm0 |
270 |
pavgusb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step |
pavgusb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step |
271 |
lea eax, [eax+edx] |
lea _EAX, [_EAX+TMP1] |
272 |
pxor mm1, mm6 ; mm1=(j^k). preserved for next step |
pxor mm1, mm6 ; mm1=(j^k). preserved for next step |
273 |
|
|
274 |
pand mm3, mm1 |
pand mm3, mm1 |
279 |
pand mm3, mm7 |
pand mm3, mm7 |
280 |
psubb mm2, mm3 |
psubb mm2, mm3 |
281 |
|
|
282 |
movq [ecx], mm2 |
movq [TMP0], mm2 |
283 |
|
|
284 |
movq mm2, [eax] |
movq mm2, [_EAX] |
285 |
movq mm3, [eax+1] |
movq mm3, [_EAX+1] |
286 |
movq mm6, mm2 |
movq mm6, mm2 |
287 |
pavgusb mm2, mm3 ; preserved for next iteration |
pavgusb mm2, mm3 ; preserved for next iteration |
288 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
289 |
pxor mm3, mm6 ; preserved for next iteration |
pxor mm3, mm6 ; preserved for next iteration |
290 |
|
|
291 |
pand mm1, mm3 |
pand mm1, mm3 |
296 |
pand mm1, mm7 |
pand mm1, mm7 |
297 |
psubb mm0, mm1 |
psubb mm0, mm1 |
298 |
|
|
299 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
300 |
%endmacro |
%endmacro |
301 |
|
|
302 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
303 |
interpolate8x8_halfpel_hv_3dn: |
interpolate8x8_halfpel_hv_3dn: |
304 |
mov eax, [esp+16] ; rounding |
mov _EAX, prm4 ; rounding |
305 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
306 |
test eax, eax |
test _EAX, _EAX |
307 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
308 |
mov edx, [esp+12] ; stride |
mov TMP1, prm3 ; stride |
309 |
|
|
310 |
movq mm7, [mmx_one] |
movq mm7, [mmx_one] |
311 |
|
|
312 |
; loop invariants: mm2=(i+j+1)/2 and mm3= i^j |
; loop invariants: mm2=(i+j+1)/2 and mm3= i^j |
313 |
movq mm2, [eax] |
movq mm2, [_EAX] |
314 |
movq mm3, [eax+1] |
movq mm3, [_EAX+1] |
315 |
movq mm6, mm2 |
movq mm6, mm2 |
316 |
pavgusb mm2, mm3 |
pavgusb mm2, mm3 |
317 |
pxor mm3, mm6 ; mm2/mm3 ready |
pxor mm3, mm6 ; mm2/mm3 ready |
319 |
jnz near .rounding1 |
jnz near .rounding1 |
320 |
|
|
321 |
COPY_HV_3DN_RND0 |
COPY_HV_3DN_RND0 |
322 |
add ecx, edx |
add TMP0, TMP1 |
323 |
COPY_HV_3DN_RND0 |
COPY_HV_3DN_RND0 |
324 |
add ecx, edx |
add TMP0, TMP1 |
325 |
COPY_HV_3DN_RND0 |
COPY_HV_3DN_RND0 |
326 |
add ecx, edx |
add TMP0, TMP1 |
327 |
COPY_HV_3DN_RND0 |
COPY_HV_3DN_RND0 |
328 |
ret |
ret |
329 |
|
|
330 |
.rounding1: |
.rounding1: |
331 |
COPY_HV_3DN_RND1 |
COPY_HV_3DN_RND1 |
332 |
add ecx, edx |
add TMP0, TMP1 |
333 |
COPY_HV_3DN_RND1 |
COPY_HV_3DN_RND1 |
334 |
add ecx, edx |
add TMP0, TMP1 |
335 |
COPY_HV_3DN_RND1 |
COPY_HV_3DN_RND1 |
336 |
add ecx, edx |
add TMP0, TMP1 |
337 |
COPY_HV_3DN_RND1 |
COPY_HV_3DN_RND1 |
338 |
ret |
ret |
339 |
ENDFUNC |
ENDFUNC |
347 |
; |
; |
348 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
349 |
|
|
350 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
351 |
interpolate8x4_halfpel_h_3dn: |
interpolate8x4_halfpel_h_3dn: |
352 |
|
|
353 |
mov eax, [esp+16] ; rounding |
mov _EAX, prm4 ; rounding |
354 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
355 |
test eax, eax |
test _EAX, _EAX |
356 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
357 |
mov edx, [esp+12] ; stride |
mov TMP1, prm3 ; stride |
358 |
|
|
359 |
jnz near .rounding1 |
jnz near .rounding1 |
360 |
|
|
361 |
COPY_H_3DN_RND0 |
COPY_H_3DN_RND0 |
362 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
363 |
COPY_H_3DN_RND0 |
COPY_H_3DN_RND0 |
364 |
ret |
ret |
365 |
|
|
367 |
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
368 |
movq mm7, [mmx_one] |
movq mm7, [mmx_one] |
369 |
COPY_H_3DN_RND1 |
COPY_H_3DN_RND1 |
370 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
371 |
COPY_H_3DN_RND1 |
COPY_H_3DN_RND1 |
372 |
ret |
ret |
373 |
ENDFUNC |
ENDFUNC |
382 |
; |
; |
383 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
384 |
|
|
385 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
386 |
interpolate8x4_halfpel_v_3dn: |
interpolate8x4_halfpel_v_3dn: |
387 |
|
|
388 |
mov eax, [esp+16] ; rounding |
mov _EAX, prm4 ; rounding |
389 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
390 |
test eax,eax |
test _EAX,_EAX |
391 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
392 |
mov edx, [esp+12] ; stride |
mov TMP1, prm3 ; stride |
393 |
|
|
394 |
; we process 2 line at a time |
; we process 2 line at a time |
395 |
|
|
396 |
jnz near .rounding1 |
jnz near .rounding1 |
397 |
|
|
398 |
COPY_V_3DN_RND0 |
COPY_V_3DN_RND0 |
399 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
400 |
COPY_V_3DN_RND0 |
COPY_V_3DN_RND0 |
401 |
ret |
ret |
402 |
|
|
403 |
.rounding1: |
.rounding1: |
404 |
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
405 |
movq mm7, [mmx_one] |
movq mm7, [mmx_one] |
406 |
movq mm2, [eax] ; loop invariant |
movq mm2, [_EAX] ; loop invariant |
407 |
add eax, edx |
add _EAX, TMP1 |
408 |
|
|
409 |
COPY_V_3DN_RND1 |
COPY_V_3DN_RND1 |
410 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
411 |
COPY_V_3DN_RND1 |
COPY_V_3DN_RND1 |
412 |
ret |
ret |
413 |
ENDFUNC |
ENDFUNC |
432 |
; (i+j+k+l+0)/4 = (s+t+1)/2 - (ij|kl)|st |
; (i+j+k+l+0)/4 = (s+t+1)/2 - (ij|kl)|st |
433 |
; with s=(i+j+1)/2, t=(k+l+1)/2, ij = i^j, kl = k^l, st = s^t. |
; with s=(i+j+1)/2, t=(k+l+1)/2, ij = i^j, kl = k^l, st = s^t. |
434 |
|
|
435 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
436 |
interpolate8x4_halfpel_hv_3dn: |
interpolate8x4_halfpel_hv_3dn: |
437 |
mov eax, [esp+16] ; rounding |
mov _EAX, prm4 ; rounding |
438 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
439 |
test eax, eax |
test _EAX, _EAX |
440 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
441 |
mov edx, [esp+12] ; stride |
mov TMP1, prm3 ; stride |
442 |
|
|
443 |
movq mm7, [mmx_one] |
movq mm7, [mmx_one] |
444 |
|
|
445 |
; loop invariants: mm2=(i+j+1)/2 and mm3= i^j |
; loop invariants: mm2=(i+j+1)/2 and mm3= i^j |
446 |
movq mm2, [eax] |
movq mm2, [_EAX] |
447 |
movq mm3, [eax+1] |
movq mm3, [_EAX+1] |
448 |
movq mm6, mm2 |
movq mm6, mm2 |
449 |
pavgusb mm2, mm3 |
pavgusb mm2, mm3 |
450 |
pxor mm3, mm6 ; mm2/mm3 ready |
pxor mm3, mm6 ; mm2/mm3 ready |
452 |
jnz near .rounding1 |
jnz near .rounding1 |
453 |
|
|
454 |
COPY_HV_3DN_RND0 |
COPY_HV_3DN_RND0 |
455 |
add ecx, edx |
add TMP0, TMP1 |
456 |
COPY_HV_3DN_RND0 |
COPY_HV_3DN_RND0 |
457 |
ret |
ret |
458 |
|
|
459 |
.rounding1: |
.rounding1: |
460 |
COPY_HV_3DN_RND1 |
COPY_HV_3DN_RND1 |
461 |
add ecx, edx |
add TMP0, TMP1 |
462 |
COPY_HV_3DN_RND1 |
COPY_HV_3DN_RND1 |
463 |
ret |
ret |
464 |
ENDFUNC |
ENDFUNC |