1 |
;/************************************************************************** |
;/***************************************************************************** |
2 |
; * |
; * |
3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * mmx sum of absolute difference |
; * mmx sum of absolute difference |
5 |
; * |
; * |
6 |
|
; * Copyright(C) 2002 Peter Ross <pross@xvid.org> |
7 |
|
; * |
8 |
|
; * This program is an implementation of a part of one or more MPEG-4 |
9 |
|
; * Video tools as specified in ISO/IEC 14496-2 standard. Those intending |
10 |
|
; * to use this software module in hardware or software products are |
11 |
|
; * advised that its use may infringe existing patents or copyrights, and |
12 |
|
; * any such use would be at such party's own risk. The original |
13 |
|
; * developer of this software module and his/her company, and subsequent |
14 |
|
; * editors and their companies, will have no liability for use of this |
15 |
|
; * software or modifications or derivatives thereof. |
16 |
|
; * |
17 |
; * This program is free software; you can redistribute it and/or modify |
; * This program is free software; you can redistribute it and/or modify |
18 |
; * it under the terms of the GNU General Public License as published by |
; * it under the terms of the GNU General Public License as published by |
19 |
; * the Free Software Foundation; either version 2 of the License, or |
; * the Free Software Foundation; either version 2 of the License, or |
26 |
; * |
; * |
27 |
; * You should have received a copy of the GNU General Public License |
; * You should have received a copy of the GNU General Public License |
28 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
29 |
; * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
|
; * |
|
|
; *************************************************************************/ |
|
|
|
|
|
;/************************************************************************** |
|
|
; * |
|
|
; * History: |
|
30 |
; * |
; * |
31 |
; * 23.07.2002 sad[16,8]bi_xmm; <pross@xvid.org> |
; ****************************************************************************/ |
|
; * 04.06.2002 cleanup -Skal- |
|
|
; * 12.11.2001 inital version; (c)2001 peter ross <pross@cs.rmit.edu.au> |
|
|
; * |
|
|
; *************************************************************************/ |
|
32 |
|
|
33 |
bits 32 |
bits 32 |
34 |
|
|
35 |
%macro cglobal 1 |
%macro cglobal 1 |
36 |
|
%ifdef PREFIX |
37 |
global _%1 |
global _%1 |
38 |
%define %1 _%1 |
%define %1 _%1 |
39 |
%else |
%else |
59 |
; uint32_t sad16_mmx(const uint8_t * const cur, |
; uint32_t sad16_mmx(const uint8_t * const cur, |
60 |
; const uint8_t * const ref, |
; const uint8_t * const ref, |
61 |
; const uint32_t stride, |
; const uint32_t stride, |
62 |
|
; const uint32_t best_sad); |
63 |
|
; |
64 |
; (early termination ignore; slows this down) |
; (early termination ignore; slows this down) |
65 |
; |
; |
66 |
;=========================================================================== |
;=========================================================================== |
210 |
; |
; |
211 |
; uint32_t sad16bi_mmx(const uint8_t * const cur, |
; uint32_t sad16bi_mmx(const uint8_t * const cur, |
212 |
; const uint8_t * const ref1, |
; const uint8_t * const ref1, |
213 |
|
; const uint8_t * const ref2, |
214 |
|
; const uint32_t stride); |
215 |
|
; |
216 |
|
;=========================================================================== |
217 |
|
%macro SADBI_16x16_MMX 2 ; SADBI_16x16_MMX( int_ptr_offset, bool_increment_ptr ); |
218 |
|
|
219 |
|
movq mm0, [edx+%1] |
220 |
|
movq mm2, [ebx+%1] |
221 |
|
movq mm1, mm0 |
222 |
|
movq mm3, mm2 |
223 |
|
|
224 |
|
%if %2 != 0 |
225 |
|
add edx, ecx |
226 |
|
%endif |
227 |
|
|
228 |
|
punpcklbw mm0, mm7 |
229 |
|
punpckhbw mm1, mm7 |
230 |
|
punpcklbw mm2, mm7 |
231 |
|
punpckhbw mm3, mm7 |
232 |
|
|
233 |
|
%if %2 != 0 |
234 |
|
add ebx, ecx |
235 |
|
%endif |
236 |
|
|
237 |
|
paddusw mm0, mm2 ; mm01 = ref1 + ref2 |
238 |
|
paddusw mm1, mm3 |
239 |
|
paddusw mm0, [mmx_one] ; mm01 += 1 |
240 |
|
paddusw mm1, [mmx_one] |
241 |
|
psrlw mm0, 1 ; mm01 >>= 1 |
242 |
|
psrlw mm1, 1 |
243 |
|
|
244 |
|
movq mm2, [eax+%1] |
245 |
|
movq mm3, mm2 |
246 |
|
punpcklbw mm2, mm7 ; mm23 = src |
247 |
|
punpckhbw mm3, mm7 |
248 |
|
|
249 |
|
%if %2 != 0 |
250 |
|
add eax, ecx |
251 |
|
%endif |
252 |
|
|
253 |
|
movq mm4, mm0 |
254 |
|
movq mm5, mm1 |
255 |
|
psubusw mm0, mm2 |
256 |
|
psubusw mm1, mm3 |
257 |
|
psubusw mm2, mm4 |
258 |
|
psubusw mm3, mm5 |
259 |
|
por mm0, mm2 ; mm01 = ABS(mm01 - mm23) |
260 |
|
por mm1, mm3 |
261 |
|
|
262 |
|
paddusw mm6,mm0 ; mm6 += mm01 |
263 |
|
paddusw mm6,mm1 |
264 |
|
|
265 |
|
%endmacro |
266 |
|
|
267 |
|
align 16 |
268 |
|
sad16bi_mmx: |
269 |
|
push ebx |
270 |
|
mov eax, [esp+4+ 4] ; Src |
271 |
|
mov edx, [esp+4+ 8] ; Ref1 |
272 |
|
mov ebx, [esp+4+12] ; Ref2 |
273 |
|
mov ecx, [esp+4+16] ; Stride |
274 |
|
|
275 |
|
pxor mm6, mm6 ; accum2 |
276 |
|
pxor mm7, mm7 |
277 |
|
.Loop |
278 |
|
SADBI_16x16_MMX 0, 0 |
279 |
|
SADBI_16x16_MMX 8, 1 |
280 |
|
SADBI_16x16_MMX 0, 0 |
281 |
|
SADBI_16x16_MMX 8, 1 |
282 |
|
SADBI_16x16_MMX 0, 0 |
283 |
|
SADBI_16x16_MMX 8, 1 |
284 |
|
SADBI_16x16_MMX 0, 0 |
285 |
|
SADBI_16x16_MMX 8, 1 |
286 |
|
SADBI_16x16_MMX 0, 0 |
287 |
|
SADBI_16x16_MMX 8, 1 |
288 |
|
SADBI_16x16_MMX 0, 0 |
289 |
|
SADBI_16x16_MMX 8, 1 |
290 |
|
SADBI_16x16_MMX 0, 0 |
291 |
|
SADBI_16x16_MMX 8, 1 |
292 |
|
SADBI_16x16_MMX 0, 0 |
293 |
|
SADBI_16x16_MMX 8, 1 |
294 |
|
|
295 |
|
SADBI_16x16_MMX 0, 0 |
296 |
|
SADBI_16x16_MMX 8, 1 |
297 |
|
SADBI_16x16_MMX 0, 0 |
298 |
|
SADBI_16x16_MMX 8, 1 |
299 |
|
SADBI_16x16_MMX 0, 0 |
300 |
|
SADBI_16x16_MMX 8, 1 |
301 |
|
SADBI_16x16_MMX 0, 0 |
302 |
|
SADBI_16x16_MMX 8, 1 |
303 |
|
SADBI_16x16_MMX 0, 0 |
304 |
|
SADBI_16x16_MMX 8, 1 |
305 |
|
SADBI_16x16_MMX 0, 0 |
306 |
|
SADBI_16x16_MMX 8, 1 |
307 |
|
SADBI_16x16_MMX 0, 0 |
308 |
|
SADBI_16x16_MMX 8, 1 |
309 |
|
SADBI_16x16_MMX 0, 0 |
310 |
|
SADBI_16x16_MMX 8, 1 |
311 |
|
|
312 |
|
pmaddwd mm6, [mmx_one] ; collapse |
313 |
|
movq mm7, mm6 |
314 |
|
psrlq mm7, 32 |
315 |
|
paddd mm6, mm7 |
316 |
|
|
317 |
|
movd eax, mm6 |
318 |
|
pop ebx |
319 |
|
ret |
320 |
|
|
321 |
|
;=========================================================================== |
322 |
|
; |
323 |
|
; uint32_t sad8bi_mmx(const uint8_t * const cur, |
324 |
|
; const uint8_t * const ref1, |
325 |
|
; const uint8_t * const ref2, |
326 |
|
; const uint32_t stride); |
327 |
|
; |
328 |
|
;=========================================================================== |
329 |
|
align 16 |
330 |
|
sad8bi_mmx: |
331 |
|
push ebx |
332 |
|
mov eax, [esp+4+ 4] ; Src |
333 |
|
mov edx, [esp+4+ 8] ; Ref1 |
334 |
|
mov ebx, [esp+4+12] ; Ref2 |
335 |
|
mov ecx, [esp+4+16] ; Stride |
336 |
|
|
337 |
|
pxor mm6, mm6 ; accum2 |
338 |
|
pxor mm7, mm7 |
339 |
|
.Loop |
340 |
|
SADBI_16x16_MMX 0, 1 |
341 |
|
SADBI_16x16_MMX 0, 1 |
342 |
|
SADBI_16x16_MMX 0, 1 |
343 |
|
SADBI_16x16_MMX 0, 1 |
344 |
|
SADBI_16x16_MMX 0, 1 |
345 |
|
SADBI_16x16_MMX 0, 1 |
346 |
|
SADBI_16x16_MMX 0, 1 |
347 |
|
SADBI_16x16_MMX 0, 1 |
348 |
|
|
349 |
|
pmaddwd mm6, [mmx_one] ; collapse |
350 |
|
movq mm7, mm6 |
351 |
|
psrlq mm7, 32 |
352 |
|
paddd mm6, mm7 |
353 |
|
|
354 |
|
movd eax, mm6 |
355 |
|
pop ebx |
356 |
|
ret |
357 |
|
|
358 |
|
|
359 |
|
|
360 |
|
|
361 |
|
;=========================================================================== |
362 |
|
; |
363 |
|
; uint32_t dev16_mmx(const uint8_t * const cur, |
364 |
|
; const uint32_t stride); |
365 |
|
; |
366 |
|
;=========================================================================== |
367 |
|
|
368 |
|
%macro MEAN_16x16_MMX 0 |
369 |
movq mm0, [eax] |
movq mm0, [eax] |
370 |
movq mm2, [eax+8] |
movq mm2, [eax+8] |
371 |
lea eax,[eax+ecx] |
lea eax,[eax+ecx] |
486 |
|
|
487 |
movd eax, mm6 |
movd eax, mm6 |
488 |
ret |
ret |
489 |
|
|