1 |
;/***************************************************************************** |
;/***************************************************************************** |
2 |
; * |
; * |
3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * mmx 8x8 block-based halfpel interpolation |
; * - mmx 8x8 block-based halfpel interpolation - |
5 |
; * |
; * |
6 |
; * Copyright(C) 2002 Peter Ross <pross@xvid.org> |
; * Copyright(C) 2001 Peter Ross <pross@xvid.org> |
7 |
|
; * 2002 Michael Militzer <isibaar@xvid.org> |
8 |
; * |
; * |
9 |
; * This file is part of XviD, a free MPEG-4 video encoder/decoder |
; * This program is free software ; you can redistribute it and/or modify |
10 |
; * |
; * it under the terms of the GNU General Public License as published by |
|
; * XviD is free software; you can redistribute it and/or modify it |
|
|
; * under the terms of the GNU General Public License as published by |
|
11 |
; * the Free Software Foundation; either version 2 of the License, or |
; * the Free Software Foundation; either version 2 of the License, or |
12 |
; * (at your option) any later version. |
; * (at your option) any later version. |
13 |
; * |
; * |
20 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
21 |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
22 |
; * |
; * |
|
; * Under section 8 of the GNU General Public License, the copyright |
|
|
; * holders of XVID explicitly forbid distribution in the following |
|
|
; * countries: |
|
|
; * |
|
|
; * - Japan |
|
|
; * - United States of America |
|
|
; * |
|
|
; * Linking XviD statically or dynamically with other modules is making a |
|
|
; * combined work based on XviD. Thus, the terms and conditions of the |
|
|
; * GNU General Public License cover the whole combination. |
|
|
; * |
|
|
; * As a special exception, the copyright holders of XviD give you |
|
|
; * permission to link XviD with independent modules that communicate with |
|
|
; * XviD solely through the VFW1.1 and DShow interfaces, regardless of the |
|
|
; * license terms of these independent modules, and to copy and distribute |
|
|
; * the resulting combined work under terms of your choice, provided that |
|
|
; * every copy of the combined work is accompanied by a complete copy of |
|
|
; * the source code of XviD (the version of XviD used to produce the |
|
|
; * combined work), being distributed under the terms of the GNU General |
|
|
; * Public License plus this exception. An independent module is a module |
|
|
; * which is not derived from or based on XviD. |
|
|
; * |
|
|
; * Note that people who make modified versions of XviD are not obligated |
|
|
; * to grant this special exception for their modified versions; it is |
|
|
; * their choice whether to do so. The GNU General Public License gives |
|
|
; * permission to release a modified version without this exception; this |
|
|
; * exception also makes it possible to release a modified version which |
|
|
; * carries forward this exception. |
|
|
; * |
|
|
; * $Id: interpolate8x8_mmx.asm,v 1.11 2002-11-17 00:20:30 edgomez Exp $ |
|
|
; * |
|
23 |
; ****************************************************************************/ |
; ****************************************************************************/ |
24 |
|
|
25 |
bits 32 |
BITS 32 |
26 |
|
|
27 |
%macro cglobal 1 |
%macro cglobal 1 |
28 |
%ifdef PREFIX |
%ifdef PREFIX |
29 |
|
%ifdef MARK_FUNCS |
30 |
|
global _%1:function %1.endfunc-%1 |
31 |
|
%define %1 _%1:function %1.endfunc-%1 |
32 |
|
%define ENDFUNC .endfunc |
33 |
|
%else |
34 |
global _%1 |
global _%1 |
35 |
%define %1 _%1 |
%define %1 _%1 |
36 |
|
%define ENDFUNC |
37 |
|
%endif |
38 |
|
%else |
39 |
|
%ifdef MARK_FUNCS |
40 |
|
global %1:function %1.endfunc-%1 |
41 |
|
%define ENDFUNC .endfunc |
42 |
%else |
%else |
43 |
global %1 |
global %1 |
44 |
|
%define ENDFUNC |
45 |
|
%endif |
46 |
%endif |
%endif |
47 |
%endmacro |
%endmacro |
48 |
|
|
49 |
section .data |
;============================================================================= |
50 |
|
; Read only data |
51 |
|
;============================================================================= |
52 |
|
|
53 |
align 16 |
%ifdef FORMAT_COFF |
54 |
|
SECTION .rodata |
55 |
|
%else |
56 |
|
SECTION .rodata align=16 |
57 |
|
%endif |
58 |
|
|
59 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
60 |
|
; (16 - r) rounding table |
61 |
|
;----------------------------------------------------------------------------- |
62 |
|
|
63 |
|
ALIGN 16 |
64 |
|
rounding_lowpass_mmx: |
65 |
|
times 4 dw 16 |
66 |
|
times 4 dw 15 |
67 |
|
|
68 |
|
;----------------------------------------------------------------------------- |
69 |
; (1 - r) rounding table |
; (1 - r) rounding table |
70 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
71 |
|
|
72 |
rounding1_mmx |
rounding1_mmx: |
73 |
times 4 dw 1 |
times 4 dw 1 |
74 |
times 4 dw 0 |
times 4 dw 0 |
75 |
|
|
76 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
77 |
; (2 - r) rounding table |
; (2 - r) rounding table |
78 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
79 |
|
|
80 |
rounding2_mmx |
rounding2_mmx: |
81 |
times 4 dw 2 |
times 4 dw 2 |
82 |
times 4 dw 1 |
times 4 dw 1 |
83 |
|
|
84 |
mmx_one |
mmx_one: |
85 |
times 8 db 1 |
times 8 db 1 |
86 |
|
|
87 |
section .text |
mmx_two: |
88 |
|
times 8 db 2 |
89 |
|
|
90 |
|
mmx_three: |
91 |
|
times 8 db 3 |
92 |
|
|
93 |
|
mmx_five: |
94 |
|
times 4 dw 5 |
95 |
|
|
96 |
|
mmx_mask: |
97 |
|
times 8 db 254 |
98 |
|
|
99 |
|
mmx_mask2: |
100 |
|
times 8 db 252 |
101 |
|
|
102 |
|
;============================================================================= |
103 |
|
; Code |
104 |
|
;============================================================================= |
105 |
|
|
106 |
|
SECTION .text |
107 |
|
|
108 |
|
cglobal interpolate8x8_halfpel_h_mmx |
109 |
|
cglobal interpolate8x8_halfpel_v_mmx |
110 |
|
cglobal interpolate8x8_halfpel_hv_mmx |
111 |
|
|
112 |
|
cglobal interpolate8x4_halfpel_h_mmx |
113 |
|
cglobal interpolate8x4_halfpel_v_mmx |
114 |
|
cglobal interpolate8x4_halfpel_hv_mmx |
115 |
|
|
116 |
|
cglobal interpolate8x8_avg4_mmx |
117 |
|
cglobal interpolate8x8_avg2_mmx |
118 |
|
|
119 |
|
cglobal interpolate8x8_6tap_lowpass_h_mmx |
120 |
|
cglobal interpolate8x8_6tap_lowpass_v_mmx |
121 |
|
|
122 |
|
cglobal interpolate8x8_halfpel_add_mmx |
123 |
|
cglobal interpolate8x8_halfpel_h_add_mmx |
124 |
|
cglobal interpolate8x8_halfpel_v_add_mmx |
125 |
|
cglobal interpolate8x8_halfpel_hv_add_mmx |
126 |
|
|
127 |
%macro CALC_AVG 6 |
%macro CALC_AVG 6 |
128 |
punpcklbw %3, %6 |
punpcklbw %3, %6 |
135 |
|
|
136 |
psrlw %1, 1 ; mm01 >>= 1 |
psrlw %1, 1 ; mm01 >>= 1 |
137 |
psrlw %2, 1 |
psrlw %2, 1 |
|
|
|
138 |
%endmacro |
%endmacro |
139 |
|
|
140 |
|
|
141 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
142 |
; |
; |
143 |
; void interpolate8x8_halfpel_h_mmx(uint8_t * const dst, |
; void interpolate8x8_halfpel_h_mmx(uint8_t * const dst, |
144 |
; const uint8_t * const src, |
; const uint8_t * const src, |
145 |
; const uint32_t stride, |
; const uint32_t stride, |
146 |
; const uint32_t rounding); |
; const uint32_t rounding); |
147 |
; |
; |
148 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
149 |
|
|
150 |
%macro COPY_H_MMX 0 |
%macro COPY_H_MMX 0 |
151 |
movq mm0, [esi] |
movq mm0, [esi] |
165 |
add edi, edx ; dst += stride |
add edi, edx ; dst += stride |
166 |
%endmacro |
%endmacro |
167 |
|
|
168 |
align 16 |
ALIGN 16 |
169 |
cglobal interpolate8x8_halfpel_h_mmx |
interpolate8x8_halfpel_h_mmx: |
|
interpolate8x8_halfpel_h_mmx |
|
170 |
|
|
171 |
push esi |
push esi |
172 |
push edi |
push edi |
|
|
|
173 |
mov eax, [esp + 8 + 16] ; rounding |
mov eax, [esp + 8 + 16] ; rounding |
174 |
|
|
|
interpolate8x8_halfpel_h_mmx.start |
|
175 |
movq mm7, [rounding1_mmx + eax * 8] |
movq mm7, [rounding1_mmx + eax * 8] |
176 |
|
|
177 |
mov edi, [esp + 8 + 4] ; dst |
mov edi, [esp + 8 + 4] ; dst |
193 |
pop esi |
pop esi |
194 |
|
|
195 |
ret |
ret |
196 |
|
ENDFUNC |
197 |
|
|
198 |
|
|
199 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
200 |
; |
; |
201 |
; void interpolate8x8_halfpel_v_mmx(uint8_t * const dst, |
; void interpolate8x8_halfpel_v_mmx(uint8_t * const dst, |
202 |
; const uint8_t * const src, |
; const uint8_t * const src, |
203 |
; const uint32_t stride, |
; const uint32_t stride, |
204 |
; const uint32_t rounding); |
; const uint32_t rounding); |
205 |
; |
; |
206 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
207 |
|
|
208 |
%macro COPY_V_MMX 0 |
%macro COPY_V_MMX 0 |
209 |
movq mm0, [esi] |
movq mm0, [esi] |
223 |
add edi, edx ; dst += stride |
add edi, edx ; dst += stride |
224 |
%endmacro |
%endmacro |
225 |
|
|
226 |
align 16 |
ALIGN 16 |
227 |
cglobal interpolate8x8_halfpel_v_mmx |
interpolate8x8_halfpel_v_mmx: |
|
interpolate8x8_halfpel_v_mmx |
|
228 |
|
|
229 |
push esi |
push esi |
230 |
push edi |
push edi |
231 |
|
|
232 |
mov eax, [esp + 8 + 16] ; rounding |
mov eax, [esp + 8 + 16] ; rounding |
233 |
|
|
|
interpolate8x8_halfpel_v_mmx.start |
|
234 |
movq mm7, [rounding1_mmx + eax * 8] |
movq mm7, [rounding1_mmx + eax * 8] |
235 |
|
|
236 |
mov edi, [esp + 8 + 4] ; dst |
mov edi, [esp + 8 + 4] ; dst |
253 |
pop esi |
pop esi |
254 |
|
|
255 |
ret |
ret |
256 |
|
ENDFUNC |
257 |
|
|
258 |
|
|
259 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
260 |
; |
; |
261 |
; void interpolate8x8_halfpel_hv_mmx(uint8_t * const dst, |
; void interpolate8x8_halfpel_hv_mmx(uint8_t * const dst, |
262 |
; const uint8_t * const src, |
; const uint8_t * const src, |
264 |
; const uint32_t rounding); |
; const uint32_t rounding); |
265 |
; |
; |
266 |
; |
; |
267 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
268 |
|
|
269 |
%macro COPY_HV_MMX 0 |
%macro COPY_HV_MMX 0 |
270 |
; current row |
; current row |
|
|
|
271 |
movq mm0, [esi] |
movq mm0, [esi] |
272 |
movq mm2, [esi + 1] |
movq mm2, [esi + 1] |
273 |
|
|
283 |
paddusw mm1, mm3 |
paddusw mm1, mm3 |
284 |
|
|
285 |
; next row |
; next row |
|
|
|
286 |
movq mm4, [esi + edx] |
movq mm4, [esi + edx] |
287 |
movq mm2, [esi + edx + 1] |
movq mm2, [esi + edx + 1] |
288 |
|
|
298 |
paddusw mm5, mm3 |
paddusw mm5, mm3 |
299 |
|
|
300 |
; add current + next row |
; add current + next row |
|
|
|
301 |
paddusw mm0, mm4 ; mm01 += mm45 |
paddusw mm0, mm4 ; mm01 += mm45 |
302 |
paddusw mm1, mm5 |
paddusw mm1, mm5 |
303 |
paddusw mm0, mm7 ; mm01 += rounding2 |
paddusw mm0, mm7 ; mm01 += rounding2 |
313 |
add edi, edx ; dst += stride |
add edi, edx ; dst += stride |
314 |
%endmacro |
%endmacro |
315 |
|
|
316 |
align 16 |
ALIGN 16 |
317 |
cglobal interpolate8x8_halfpel_hv_mmx |
interpolate8x8_halfpel_hv_mmx: |
|
interpolate8x8_halfpel_hv_mmx |
|
318 |
|
|
319 |
push esi |
push esi |
320 |
push edi |
push edi |
321 |
|
|
322 |
mov eax, [esp + 8 + 16] ; rounding |
mov eax, [esp + 8 + 16] ; rounding |
|
interpolate8x8_halfpel_hv_mmx.start |
|
323 |
|
|
324 |
movq mm7, [rounding2_mmx + eax * 8] |
movq mm7, [rounding2_mmx + eax * 8] |
325 |
|
|
345 |
pop esi |
pop esi |
346 |
|
|
347 |
ret |
ret |
348 |
|
ENDFUNC |
349 |
|
|
350 |
|
;----------------------------------------------------------------------------- |
351 |
|
; |
352 |
|
; void interpolate8x4_halfpel_h_mmx(uint8_t * const dst, |
353 |
|
; const uint8_t * const src, |
354 |
|
; const uint32_t stride, |
355 |
|
; const uint32_t rounding); |
356 |
|
; |
357 |
|
;----------------------------------------------------------------------------- |
358 |
|
|
359 |
|
ALIGN 16 |
360 |
|
interpolate8x4_halfpel_h_mmx: |
361 |
|
|
362 |
|
push esi |
363 |
|
push edi |
364 |
|
mov eax, [esp + 8 + 16] ; rounding |
365 |
|
|
366 |
|
movq mm7, [rounding1_mmx + eax * 8] |
367 |
|
|
368 |
|
mov edi, [esp + 8 + 4] ; dst |
369 |
|
mov esi, [esp + 8 + 8] ; src |
370 |
|
mov edx, [esp + 8 + 12] ; stride |
371 |
|
|
372 |
|
pxor mm6, mm6 ; zero |
373 |
|
|
374 |
|
COPY_H_MMX |
375 |
|
COPY_H_MMX |
376 |
|
COPY_H_MMX |
377 |
|
COPY_H_MMX |
378 |
|
|
379 |
|
pop edi |
380 |
|
pop esi |
381 |
|
|
382 |
|
ret |
383 |
|
ENDFUNC |
384 |
|
|
385 |
|
|
386 |
|
;----------------------------------------------------------------------------- |
387 |
|
; |
388 |
|
; void interpolate8x4_halfpel_v_mmx(uint8_t * const dst, |
389 |
|
; const uint8_t * const src, |
390 |
|
; const uint32_t stride, |
391 |
|
; const uint32_t rounding); |
392 |
|
; |
393 |
|
;----------------------------------------------------------------------------- |
394 |
|
|
395 |
|
ALIGN 16 |
396 |
|
interpolate8x4_halfpel_v_mmx: |
397 |
|
|
398 |
|
push esi |
399 |
|
push edi |
400 |
|
|
401 |
|
mov eax, [esp + 8 + 16] ; rounding |
402 |
|
|
403 |
|
movq mm7, [rounding1_mmx + eax * 8] |
404 |
|
|
405 |
|
mov edi, [esp + 8 + 4] ; dst |
406 |
|
mov esi, [esp + 8 + 8] ; src |
407 |
|
mov edx, [esp + 8 + 12] ; stride |
408 |
|
|
409 |
|
pxor mm6, mm6 ; zero |
410 |
|
|
411 |
|
|
412 |
|
COPY_V_MMX |
413 |
|
COPY_V_MMX |
414 |
|
COPY_V_MMX |
415 |
|
COPY_V_MMX |
416 |
|
|
417 |
|
pop edi |
418 |
|
pop esi |
419 |
|
|
420 |
|
ret |
421 |
|
ENDFUNC |
422 |
|
|
423 |
|
|
424 |
|
;----------------------------------------------------------------------------- |
425 |
|
; |
426 |
|
; void interpolate8x4_halfpel_hv_mmx(uint8_t * const dst, |
427 |
|
; const uint8_t * const src, |
428 |
|
; const uint32_t stride, |
429 |
|
; const uint32_t rounding); |
430 |
|
; |
431 |
|
; |
432 |
|
;----------------------------------------------------------------------------- |
433 |
|
|
434 |
|
ALIGN 16 |
435 |
|
interpolate8x4_halfpel_hv_mmx: |
436 |
|
|
437 |
|
push esi |
438 |
|
push edi |
439 |
|
|
440 |
|
mov eax, [esp + 8 + 16] ; rounding |
441 |
|
|
442 |
|
movq mm7, [rounding2_mmx + eax * 8] |
443 |
|
|
444 |
|
mov edi, [esp + 8 + 4] ; dst |
445 |
|
mov esi, [esp + 8 + 8] ; src |
446 |
|
|
447 |
|
mov eax, 8 |
448 |
|
|
449 |
|
pxor mm6, mm6 ; zero |
450 |
|
|
451 |
|
mov edx, [esp + 8 + 12] ; stride |
452 |
|
|
453 |
|
COPY_HV_MMX |
454 |
|
COPY_HV_MMX |
455 |
|
COPY_HV_MMX |
456 |
|
COPY_HV_MMX |
457 |
|
|
458 |
|
pop edi |
459 |
|
pop esi |
460 |
|
|
461 |
|
ret |
462 |
|
ENDFUNC |
463 |
|
|
464 |
|
;----------------------------------------------------------------------------- |
465 |
|
; |
466 |
|
; void interpolate8x8_avg2_mmx(uint8_t const *dst, |
467 |
|
; const uint8_t * const src1, |
468 |
|
; const uint8_t * const src2, |
469 |
|
; const uint32_t stride, |
470 |
|
; const uint32_t rounding, |
471 |
|
; const uint32_t height); |
472 |
|
; |
473 |
|
;----------------------------------------------------------------------------- |
474 |
|
|
475 |
|
%macro AVG2_MMX_RND0 0 |
476 |
|
movq mm0, [eax] ; src1 -> mm0 |
477 |
|
movq mm1, [ebx] ; src2 -> mm1 |
478 |
|
|
479 |
|
movq mm4, [eax+edx] |
480 |
|
movq mm5, [ebx+edx] |
481 |
|
|
482 |
|
movq mm2, mm0 ; src1 -> mm2 |
483 |
|
movq mm3, mm1 ; src2 -> mm3 |
484 |
|
|
485 |
|
pand mm2, mm7 ; isolate the lsb |
486 |
|
pand mm3, mm7 ; isolate the lsb |
487 |
|
|
488 |
|
por mm2, mm3 ; ODD(src1) OR ODD(src2) -> mm2 |
489 |
|
|
490 |
|
movq mm3, mm4 |
491 |
|
movq mm6, mm5 |
492 |
|
|
493 |
|
pand mm3, mm7 |
494 |
|
pand mm6, mm7 |
495 |
|
|
496 |
|
por mm3, mm6 |
497 |
|
|
498 |
|
pand mm0, [mmx_mask] |
499 |
|
pand mm1, [mmx_mask] |
500 |
|
pand mm4, [mmx_mask] |
501 |
|
pand mm5, [mmx_mask] |
502 |
|
|
503 |
|
psrlq mm0, 1 ; src1 / 2 |
504 |
|
psrlq mm1, 1 ; src2 / 2 |
505 |
|
|
506 |
|
psrlq mm4, 1 |
507 |
|
psrlq mm5, 1 |
508 |
|
|
509 |
|
paddb mm0, mm1 ; src1/2 + src2/2 -> mm0 |
510 |
|
paddb mm0, mm2 ; correct rounding error |
511 |
|
|
512 |
|
paddb mm4, mm5 |
513 |
|
paddb mm4, mm3 |
514 |
|
|
515 |
|
lea eax, [eax+2*edx] |
516 |
|
lea ebx, [ebx+2*edx] |
517 |
|
|
518 |
|
movq [ecx], mm0 ; (src1 + src2 + 1) / 2 -> dst |
519 |
|
movq [ecx+edx], mm4 |
520 |
|
%endmacro |
521 |
|
|
522 |
|
%macro AVG2_MMX_RND1 0 |
523 |
|
movq mm0, [eax] ; src1 -> mm0 |
524 |
|
movq mm1, [ebx] ; src2 -> mm1 |
525 |
|
|
526 |
|
movq mm4, [eax+edx] |
527 |
|
movq mm5, [ebx+edx] |
528 |
|
|
529 |
|
movq mm2, mm0 ; src1 -> mm2 |
530 |
|
movq mm3, mm1 ; src2 -> mm3 |
531 |
|
|
532 |
|
pand mm2, mm7 ; isolate the lsb |
533 |
|
pand mm3, mm7 ; isolate the lsb |
534 |
|
|
535 |
|
pand mm2, mm3 ; ODD(src1) AND ODD(src2) -> mm2 |
536 |
|
|
537 |
|
movq mm3, mm4 |
538 |
|
movq mm6, mm5 |
539 |
|
|
540 |
|
pand mm3, mm7 |
541 |
|
pand mm6, mm7 |
542 |
|
|
543 |
|
pand mm3, mm6 |
544 |
|
|
545 |
|
pand mm0, [mmx_mask] |
546 |
|
pand mm1, [mmx_mask] |
547 |
|
pand mm4, [mmx_mask] |
548 |
|
pand mm5, [mmx_mask] |
549 |
|
|
550 |
|
psrlq mm0, 1 ; src1 / 2 |
551 |
|
psrlq mm1, 1 ; src2 / 2 |
552 |
|
|
553 |
|
psrlq mm4, 1 |
554 |
|
psrlq mm5, 1 |
555 |
|
|
556 |
|
paddb mm0, mm1 ; src1/2 + src2/2 -> mm0 |
557 |
|
paddb mm0, mm2 ; correct rounding error |
558 |
|
|
559 |
|
paddb mm4, mm5 |
560 |
|
paddb mm4, mm3 |
561 |
|
|
562 |
|
lea eax, [eax+2*edx] |
563 |
|
lea ebx, [ebx+2*edx] |
564 |
|
|
565 |
|
movq [ecx], mm0 ; (src1 + src2 + 1) / 2 -> dst |
566 |
|
movq [ecx+edx], mm4 |
567 |
|
%endmacro |
568 |
|
|
569 |
|
ALIGN 16 |
570 |
|
interpolate8x8_avg2_mmx: |
571 |
|
|
572 |
|
push ebx |
573 |
|
|
574 |
|
mov eax, [esp + 4 + 20] ; rounding |
575 |
|
test eax, eax |
576 |
|
|
577 |
|
jnz near .rounding1 |
578 |
|
|
579 |
|
mov eax, [esp + 4 + 24] ; height -> eax |
580 |
|
sub eax, 8 |
581 |
|
test eax, eax |
582 |
|
|
583 |
|
mov ecx, [esp + 4 + 4] ; dst -> edi |
584 |
|
mov eax, [esp + 4 + 8] ; src1 -> esi |
585 |
|
mov ebx, [esp + 4 + 12] ; src2 -> eax |
586 |
|
mov edx, [esp + 4 + 16] ; stride -> edx |
587 |
|
|
588 |
|
movq mm7, [mmx_one] |
589 |
|
|
590 |
|
jz near .start0 |
591 |
|
|
592 |
|
AVG2_MMX_RND0 |
593 |
|
lea ecx, [ecx+2*edx] |
594 |
|
|
595 |
|
.start0: |
596 |
|
|
597 |
|
AVG2_MMX_RND0 |
598 |
|
lea ecx, [ecx+2*edx] |
599 |
|
AVG2_MMX_RND0 |
600 |
|
lea ecx, [ecx+2*edx] |
601 |
|
AVG2_MMX_RND0 |
602 |
|
lea ecx, [ecx+2*edx] |
603 |
|
AVG2_MMX_RND0 |
604 |
|
|
605 |
|
pop ebx |
606 |
|
ret |
607 |
|
|
608 |
|
.rounding1: |
609 |
|
mov eax, [esp + 4 + 24] ; height -> eax |
610 |
|
sub eax, 8 |
611 |
|
test eax, eax |
612 |
|
|
613 |
|
mov ecx, [esp + 4 + 4] ; dst -> edi |
614 |
|
mov eax, [esp + 4 + 8] ; src1 -> esi |
615 |
|
mov ebx, [esp + 4 + 12] ; src2 -> eax |
616 |
|
mov edx, [esp + 4 + 16] ; stride -> edx |
617 |
|
|
618 |
|
movq mm7, [mmx_one] |
619 |
|
|
620 |
|
jz near .start1 |
621 |
|
|
622 |
|
AVG2_MMX_RND1 |
623 |
|
lea ecx, [ecx+2*edx] |
624 |
|
|
625 |
|
.start1: |
626 |
|
|
627 |
|
AVG2_MMX_RND1 |
628 |
|
lea ecx, [ecx+2*edx] |
629 |
|
AVG2_MMX_RND1 |
630 |
|
lea ecx, [ecx+2*edx] |
631 |
|
AVG2_MMX_RND1 |
632 |
|
lea ecx, [ecx+2*edx] |
633 |
|
AVG2_MMX_RND1 |
634 |
|
|
635 |
|
pop ebx |
636 |
|
ret |
637 |
|
ENDFUNC |
638 |
|
|
639 |
|
|
640 |
|
;----------------------------------------------------------------------------- |
641 |
|
; |
642 |
|
; void interpolate8x8_avg4_mmx(uint8_t const *dst, |
643 |
|
; const uint8_t * const src1, |
644 |
|
; const uint8_t * const src2, |
645 |
|
; const uint8_t * const src3, |
646 |
|
; const uint8_t * const src4, |
647 |
|
; const uint32_t stride, |
648 |
|
; const uint32_t rounding); |
649 |
|
; |
650 |
|
;----------------------------------------------------------------------------- |
651 |
|
|
652 |
|
%macro AVG4_MMX_RND0 0 |
653 |
|
movq mm0, [eax] ; src1 -> mm0 |
654 |
|
movq mm1, [ebx] ; src2 -> mm1 |
655 |
|
|
656 |
|
movq mm2, mm0 |
657 |
|
movq mm3, mm1 |
658 |
|
|
659 |
|
pand mm2, [mmx_three] |
660 |
|
pand mm3, [mmx_three] |
661 |
|
|
662 |
|
pand mm0, [mmx_mask2] |
663 |
|
pand mm1, [mmx_mask2] |
664 |
|
|
665 |
|
psrlq mm0, 2 |
666 |
|
psrlq mm1, 2 |
667 |
|
|
668 |
|
lea eax, [eax+edx] |
669 |
|
lea ebx, [ebx+edx] |
670 |
|
|
671 |
|
paddb mm0, mm1 |
672 |
|
paddb mm2, mm3 |
673 |
|
|
674 |
|
movq mm4, [esi] ; src3 -> mm0 |
675 |
|
movq mm5, [edi] ; src4 -> mm1 |
676 |
|
|
677 |
|
movq mm1, mm4 |
678 |
|
movq mm3, mm5 |
679 |
|
|
680 |
|
pand mm1, [mmx_three] |
681 |
|
pand mm3, [mmx_three] |
682 |
|
|
683 |
|
pand mm4, [mmx_mask2] |
684 |
|
pand mm5, [mmx_mask2] |
685 |
|
|
686 |
|
psrlq mm4, 2 |
687 |
|
psrlq mm5, 2 |
688 |
|
|
689 |
|
paddb mm4, mm5 |
690 |
|
paddb mm0, mm4 |
691 |
|
|
692 |
|
paddb mm1, mm3 |
693 |
|
paddb mm2, mm1 |
694 |
|
|
695 |
|
paddb mm2, [mmx_two] |
696 |
|
pand mm2, [mmx_mask2] |
697 |
|
|
698 |
|
psrlq mm2, 2 |
699 |
|
paddb mm0, mm2 |
700 |
|
|
701 |
|
lea esi, [esi+edx] |
702 |
|
lea edi, [edi+edx] |
703 |
|
|
704 |
|
movq [ecx], mm0 ; (src1 + src2 + src3 + src4 + 2) / 4 -> dst |
705 |
|
%endmacro |
706 |
|
|
707 |
|
%macro AVG4_MMX_RND1 0 |
708 |
|
movq mm0, [eax] ; src1 -> mm0 |
709 |
|
movq mm1, [ebx] ; src2 -> mm1 |
710 |
|
|
711 |
|
movq mm2, mm0 |
712 |
|
movq mm3, mm1 |
713 |
|
|
714 |
|
pand mm2, [mmx_three] |
715 |
|
pand mm3, [mmx_three] |
716 |
|
|
717 |
|
pand mm0, [mmx_mask2] |
718 |
|
pand mm1, [mmx_mask2] |
719 |
|
|
720 |
|
psrlq mm0, 2 |
721 |
|
psrlq mm1, 2 |
722 |
|
|
723 |
|
lea eax,[eax+edx] |
724 |
|
lea ebx,[ebx+edx] |
725 |
|
|
726 |
|
paddb mm0, mm1 |
727 |
|
paddb mm2, mm3 |
728 |
|
|
729 |
|
movq mm4, [esi] ; src3 -> mm0 |
730 |
|
movq mm5, [edi] ; src4 -> mm1 |
731 |
|
|
732 |
|
movq mm1, mm4 |
733 |
|
movq mm3, mm5 |
734 |
|
|
735 |
|
pand mm1, [mmx_three] |
736 |
|
pand mm3, [mmx_three] |
737 |
|
|
738 |
|
pand mm4, [mmx_mask2] |
739 |
|
pand mm5, [mmx_mask2] |
740 |
|
|
741 |
|
psrlq mm4, 2 |
742 |
|
psrlq mm5, 2 |
743 |
|
|
744 |
|
paddb mm4, mm5 |
745 |
|
paddb mm0, mm4 |
746 |
|
|
747 |
|
paddb mm1, mm3 |
748 |
|
paddb mm2, mm1 |
749 |
|
|
750 |
|
paddb mm2, [mmx_one] |
751 |
|
pand mm2, [mmx_mask2] |
752 |
|
|
753 |
|
psrlq mm2, 2 |
754 |
|
paddb mm0, mm2 |
755 |
|
|
756 |
|
lea esi,[esi+edx] |
757 |
|
lea edi,[edi+edx] |
758 |
|
|
759 |
|
movq [ecx], mm0 ; (src1 + src2 + src3 + src4 + 2) / 4 -> dst |
760 |
|
%endmacro |
761 |
|
|
762 |
|
ALIGN 16 |
763 |
|
interpolate8x8_avg4_mmx: |
764 |
|
|
765 |
|
push ebx |
766 |
|
push edi |
767 |
|
push esi |
768 |
|
|
769 |
|
mov eax, [esp + 12 + 28] ; rounding |
770 |
|
|
771 |
|
test eax, eax |
772 |
|
|
773 |
|
mov ecx, [esp + 12 + 4] ; dst -> edi |
774 |
|
mov eax, [esp + 12 + 8] ; src1 -> esi |
775 |
|
mov ebx, [esp + 12 + 12] ; src2 -> eax |
776 |
|
mov esi, [esp + 12 + 16] ; src3 -> esi |
777 |
|
mov edi, [esp + 12 + 20] ; src4 -> edi |
778 |
|
mov edx, [esp + 12 + 24] ; stride -> edx |
779 |
|
|
780 |
|
movq mm7, [mmx_one] |
781 |
|
|
782 |
|
jnz near .rounding1 |
783 |
|
|
784 |
|
AVG4_MMX_RND0 |
785 |
|
lea ecx, [ecx+edx] |
786 |
|
AVG4_MMX_RND0 |
787 |
|
lea ecx, [ecx+edx] |
788 |
|
AVG4_MMX_RND0 |
789 |
|
lea ecx, [ecx+edx] |
790 |
|
AVG4_MMX_RND0 |
791 |
|
lea ecx, [ecx+edx] |
792 |
|
AVG4_MMX_RND0 |
793 |
|
lea ecx, [ecx+edx] |
794 |
|
AVG4_MMX_RND0 |
795 |
|
lea ecx, [ecx+edx] |
796 |
|
AVG4_MMX_RND0 |
797 |
|
lea ecx, [ecx+edx] |
798 |
|
AVG4_MMX_RND0 |
799 |
|
|
800 |
|
pop esi |
801 |
|
pop edi |
802 |
|
pop ebx |
803 |
|
ret |
804 |
|
|
805 |
|
.rounding1: |
806 |
|
AVG4_MMX_RND1 |
807 |
|
lea ecx, [ecx+edx] |
808 |
|
AVG4_MMX_RND1 |
809 |
|
lea ecx, [ecx+edx] |
810 |
|
AVG4_MMX_RND1 |
811 |
|
lea ecx, [ecx+edx] |
812 |
|
AVG4_MMX_RND1 |
813 |
|
lea ecx, [ecx+edx] |
814 |
|
AVG4_MMX_RND1 |
815 |
|
lea ecx, [ecx+edx] |
816 |
|
AVG4_MMX_RND1 |
817 |
|
lea ecx, [ecx+edx] |
818 |
|
AVG4_MMX_RND1 |
819 |
|
lea ecx, [ecx+edx] |
820 |
|
AVG4_MMX_RND1 |
821 |
|
|
822 |
|
pop esi |
823 |
|
pop edi |
824 |
|
pop ebx |
825 |
|
ret |
826 |
|
ENDFUNC |
827 |
|
|
828 |
|
|
829 |
|
;----------------------------------------------------------------------------- |
830 |
|
; |
831 |
|
; void interpolate8x8_6tap_lowpass_h_mmx(uint8_t const *dst, |
832 |
|
; const uint8_t * const src, |
833 |
|
; const uint32_t stride, |
834 |
|
; const uint32_t rounding); |
835 |
|
; |
836 |
|
;----------------------------------------------------------------------------- |
837 |
|
|
838 |
|
%macro LOWPASS_6TAP_H_MMX 0 |
839 |
|
movq mm0, [eax] |
840 |
|
movq mm2, [eax+1] |
841 |
|
|
842 |
|
movq mm1, mm0 |
843 |
|
movq mm3, mm2 |
844 |
|
|
845 |
|
punpcklbw mm0, mm7 |
846 |
|
punpcklbw mm2, mm7 |
847 |
|
|
848 |
|
punpckhbw mm1, mm7 |
849 |
|
punpckhbw mm3, mm7 |
850 |
|
|
851 |
|
paddw mm0, mm2 |
852 |
|
paddw mm1, mm3 |
853 |
|
|
854 |
|
psllw mm0, 2 |
855 |
|
psllw mm1, 2 |
856 |
|
|
857 |
|
movq mm2, [eax-1] |
858 |
|
movq mm4, [eax+2] |
859 |
|
|
860 |
|
movq mm3, mm2 |
861 |
|
movq mm5, mm4 |
862 |
|
|
863 |
|
punpcklbw mm2, mm7 |
864 |
|
punpcklbw mm4, mm7 |
865 |
|
|
866 |
|
punpckhbw mm3, mm7 |
867 |
|
punpckhbw mm5, mm7 |
868 |
|
|
869 |
|
paddw mm2, mm4 |
870 |
|
paddw mm3, mm5 |
871 |
|
|
872 |
|
psubsw mm0, mm2 |
873 |
|
psubsw mm1, mm3 |
874 |
|
|
875 |
|
pmullw mm0, [mmx_five] |
876 |
|
pmullw mm1, [mmx_five] |
877 |
|
|
878 |
|
movq mm2, [eax-2] |
879 |
|
movq mm4, [eax+3] |
880 |
|
|
881 |
|
movq mm3, mm2 |
882 |
|
movq mm5, mm4 |
883 |
|
|
884 |
|
punpcklbw mm2, mm7 |
885 |
|
punpcklbw mm4, mm7 |
886 |
|
|
887 |
|
punpckhbw mm3, mm7 |
888 |
|
punpckhbw mm5, mm7 |
889 |
|
|
890 |
|
paddw mm2, mm4 |
891 |
|
paddw mm3, mm5 |
892 |
|
|
893 |
|
paddsw mm0, mm2 |
894 |
|
paddsw mm1, mm3 |
895 |
|
|
896 |
|
paddsw mm0, mm6 |
897 |
|
paddsw mm1, mm6 |
898 |
|
|
899 |
|
psraw mm0, 5 |
900 |
|
psraw mm1, 5 |
901 |
|
|
902 |
|
lea eax, [eax+edx] |
903 |
|
packuswb mm0, mm1 |
904 |
|
movq [ecx], mm0 |
905 |
|
%endmacro |
906 |
|
|
907 |
|
ALIGN 16 |
908 |
|
interpolate8x8_6tap_lowpass_h_mmx: |
909 |
|
|
910 |
|
mov eax, [esp + 16] ; rounding |
911 |
|
|
912 |
|
movq mm6, [rounding_lowpass_mmx + eax * 8] |
913 |
|
|
914 |
|
mov ecx, [esp + 4] ; dst -> edi |
915 |
|
mov eax, [esp + 8] ; src -> esi |
916 |
|
mov edx, [esp + 12] ; stride -> edx |
917 |
|
|
918 |
|
pxor mm7, mm7 |
919 |
|
|
920 |
|
LOWPASS_6TAP_H_MMX |
921 |
|
lea ecx, [ecx+edx] |
922 |
|
LOWPASS_6TAP_H_MMX |
923 |
|
lea ecx, [ecx+edx] |
924 |
|
LOWPASS_6TAP_H_MMX |
925 |
|
lea ecx, [ecx+edx] |
926 |
|
LOWPASS_6TAP_H_MMX |
927 |
|
lea ecx, [ecx+edx] |
928 |
|
LOWPASS_6TAP_H_MMX |
929 |
|
lea ecx, [ecx+edx] |
930 |
|
LOWPASS_6TAP_H_MMX |
931 |
|
lea ecx, [ecx+edx] |
932 |
|
LOWPASS_6TAP_H_MMX |
933 |
|
lea ecx, [ecx+edx] |
934 |
|
LOWPASS_6TAP_H_MMX |
935 |
|
|
936 |
|
ret |
937 |
|
ENDFUNC |
938 |
|
|
939 |
|
;----------------------------------------------------------------------------- |
940 |
|
; |
941 |
|
; void interpolate8x8_6tap_lowpass_v_mmx(uint8_t const *dst, |
942 |
|
; const uint8_t * const src, |
943 |
|
; const uint32_t stride, |
944 |
|
; const uint32_t rounding); |
945 |
|
; |
946 |
|
;----------------------------------------------------------------------------- |
947 |
|
|
948 |
|
%macro LOWPASS_6TAP_V_MMX 0 |
949 |
|
movq mm0, [eax] |
950 |
|
movq mm2, [eax+edx] |
951 |
|
|
952 |
|
movq mm1, mm0 |
953 |
|
movq mm3, mm2 |
954 |
|
|
955 |
|
punpcklbw mm0, mm7 |
956 |
|
punpcklbw mm2, mm7 |
957 |
|
|
958 |
|
punpckhbw mm1, mm7 |
959 |
|
punpckhbw mm3, mm7 |
960 |
|
|
961 |
|
paddw mm0, mm2 |
962 |
|
paddw mm1, mm3 |
963 |
|
|
964 |
|
psllw mm0, 2 |
965 |
|
psllw mm1, 2 |
966 |
|
|
967 |
|
movq mm4, [eax+2*edx] |
968 |
|
sub eax, ebx |
969 |
|
movq mm2, [eax+2*edx] |
970 |
|
|
971 |
|
movq mm3, mm2 |
972 |
|
movq mm5, mm4 |
973 |
|
|
974 |
|
punpcklbw mm2, mm7 |
975 |
|
punpcklbw mm4, mm7 |
976 |
|
|
977 |
|
punpckhbw mm3, mm7 |
978 |
|
punpckhbw mm5, mm7 |
979 |
|
|
980 |
|
paddw mm2, mm4 |
981 |
|
paddw mm3, mm5 |
982 |
|
|
983 |
|
psubsw mm0, mm2 |
984 |
|
psubsw mm1, mm3 |
985 |
|
|
986 |
|
pmullw mm0, [mmx_five] |
987 |
|
pmullw mm1, [mmx_five] |
988 |
|
|
989 |
|
movq mm2, [eax+edx] |
990 |
|
movq mm4, [eax+2*ebx] |
991 |
|
|
992 |
|
movq mm3, mm2 |
993 |
|
movq mm5, mm4 |
994 |
|
|
995 |
|
punpcklbw mm2, mm7 |
996 |
|
punpcklbw mm4, mm7 |
997 |
|
|
998 |
|
punpckhbw mm3, mm7 |
999 |
|
punpckhbw mm5, mm7 |
1000 |
|
|
1001 |
|
paddw mm2, mm4 |
1002 |
|
paddw mm3, mm5 |
1003 |
|
|
1004 |
|
paddsw mm0, mm2 |
1005 |
|
paddsw mm1, mm3 |
1006 |
|
|
1007 |
|
paddsw mm0, mm6 |
1008 |
|
paddsw mm1, mm6 |
1009 |
|
|
1010 |
|
psraw mm0, 5 |
1011 |
|
psraw mm1, 5 |
1012 |
|
|
1013 |
|
lea eax, [eax+4*edx] |
1014 |
|
packuswb mm0, mm1 |
1015 |
|
movq [ecx], mm0 |
1016 |
|
%endmacro |
1017 |
|
|
1018 |
|
ALIGN 16 |
1019 |
|
interpolate8x8_6tap_lowpass_v_mmx: |
1020 |
|
|
1021 |
|
push ebx |
1022 |
|
|
1023 |
|
mov eax, [esp + 4 + 16] ; rounding |
1024 |
|
|
1025 |
|
movq mm6, [rounding_lowpass_mmx + eax * 8] |
1026 |
|
|
1027 |
|
mov ecx, [esp + 4 + 4] ; dst -> edi |
1028 |
|
mov eax, [esp + 4 + 8] ; src -> esi |
1029 |
|
mov edx, [esp + 4 + 12] ; stride -> edx |
1030 |
|
|
1031 |
|
mov ebx, edx |
1032 |
|
shl ebx, 1 |
1033 |
|
add ebx, edx |
1034 |
|
|
1035 |
|
pxor mm7, mm7 |
1036 |
|
|
1037 |
|
LOWPASS_6TAP_V_MMX |
1038 |
|
lea ecx, [ecx+edx] |
1039 |
|
LOWPASS_6TAP_V_MMX |
1040 |
|
lea ecx, [ecx+edx] |
1041 |
|
LOWPASS_6TAP_V_MMX |
1042 |
|
lea ecx, [ecx+edx] |
1043 |
|
LOWPASS_6TAP_V_MMX |
1044 |
|
lea ecx, [ecx+edx] |
1045 |
|
LOWPASS_6TAP_V_MMX |
1046 |
|
lea ecx, [ecx+edx] |
1047 |
|
LOWPASS_6TAP_V_MMX |
1048 |
|
lea ecx, [ecx+edx] |
1049 |
|
LOWPASS_6TAP_V_MMX |
1050 |
|
lea ecx, [ecx+edx] |
1051 |
|
LOWPASS_6TAP_V_MMX |
1052 |
|
|
1053 |
|
pop ebx |
1054 |
|
ret |
1055 |
|
ENDFUNC |
1056 |
|
|
1057 |
|
;=========================================================================== |
1058 |
|
; |
1059 |
|
; The next functions combine both source halfpel interpolation step and the |
1060 |
|
; averaging (with rouding) step to avoid wasting memory bandwidth computing |
1061 |
|
; intermediate halfpel images and then averaging them. |
1062 |
|
; |
1063 |
|
;=========================================================================== |
1064 |
|
|
1065 |
|
%macro PROLOG0 0 |
1066 |
|
mov ecx, [esp+ 4] ; Dst |
1067 |
|
mov eax, [esp+ 8] ; Src |
1068 |
|
mov edx, [esp+12] ; BpS |
1069 |
|
%endmacro |
1070 |
|
|
1071 |
|
%macro PROLOG 2 ; %1: Rounder, %2 load Dst-Rounder |
1072 |
|
pxor mm6, mm6 |
1073 |
|
movq mm7, [%1] ; TODO: dangerous! (eax isn't checked) |
1074 |
|
%if %2 |
1075 |
|
movq mm5, [rounding1_mmx] |
1076 |
|
%endif |
1077 |
|
|
1078 |
|
PROLOG0 |
1079 |
|
%endmacro |
1080 |
|
|
1081 |
|
; performs: mm0 == (mm0+mm2) mm1 == (mm1+mm3) |
1082 |
|
%macro MIX 0 |
1083 |
|
punpcklbw mm0, mm6 |
1084 |
|
punpcklbw mm2, mm6 |
1085 |
|
punpckhbw mm1, mm6 |
1086 |
|
punpckhbw mm3, mm6 |
1087 |
|
paddusw mm0, mm2 |
1088 |
|
paddusw mm1, mm3 |
1089 |
|
%endmacro |
1090 |
|
|
1091 |
|
%macro MIX_DST 0 |
1092 |
|
movq mm3, mm2 |
1093 |
|
paddusw mm0, mm7 ; rounder |
1094 |
|
paddusw mm1, mm7 ; rounder |
1095 |
|
punpcklbw mm2, mm6 |
1096 |
|
punpckhbw mm3, mm6 |
1097 |
|
psrlw mm0, 1 |
1098 |
|
psrlw mm1, 1 |
1099 |
|
|
1100 |
|
paddusw mm0, mm2 ; mix Src(mm0/mm1) with Dst(mm2/mm3) |
1101 |
|
paddusw mm1, mm3 |
1102 |
|
paddusw mm0, mm5 |
1103 |
|
paddusw mm1, mm5 |
1104 |
|
psrlw mm0, 1 |
1105 |
|
psrlw mm1, 1 |
1106 |
|
|
1107 |
|
packuswb mm0, mm1 |
1108 |
|
%endmacro |
1109 |
|
|
1110 |
|
%macro MIX2 0 |
1111 |
|
punpcklbw mm0, mm6 |
1112 |
|
punpcklbw mm2, mm6 |
1113 |
|
paddusw mm0, mm2 |
1114 |
|
paddusw mm0, mm7 |
1115 |
|
punpckhbw mm1, mm6 |
1116 |
|
punpckhbw mm3, mm6 |
1117 |
|
paddusw mm1, mm7 |
1118 |
|
paddusw mm1, mm3 |
1119 |
|
psrlw mm0, 1 |
1120 |
|
psrlw mm1, 1 |
1121 |
|
|
1122 |
|
packuswb mm0, mm1 |
1123 |
|
%endmacro |
1124 |
|
|
1125 |
|
;=========================================================================== |
1126 |
|
; |
1127 |
|
; void interpolate8x8_halfpel_add_mmx(uint8_t * const dst, |
1128 |
|
; const uint8_t * const src, |
1129 |
|
; const uint32_t stride, |
1130 |
|
; const uint32_t rounding); |
1131 |
|
; |
1132 |
|
; |
1133 |
|
;=========================================================================== |
1134 |
|
|
1135 |
|
%macro ADD_FF_MMX 1 |
1136 |
|
movq mm0, [eax] |
1137 |
|
movq mm2, [ecx] |
1138 |
|
movq mm1, mm0 |
1139 |
|
movq mm3, mm2 |
1140 |
|
%if (%1!=0) |
1141 |
|
lea eax,[eax+%1*edx] |
1142 |
|
%endif |
1143 |
|
MIX |
1144 |
|
paddusw mm0, mm5 ; rounder |
1145 |
|
paddusw mm1, mm5 ; rounder |
1146 |
|
psrlw mm0, 1 |
1147 |
|
psrlw mm1, 1 |
1148 |
|
|
1149 |
|
packuswb mm0, mm1 |
1150 |
|
movq [ecx], mm0 |
1151 |
|
%if (%1!=0) |
1152 |
|
lea ecx,[ecx+%1*edx] |
1153 |
|
%endif |
1154 |
|
%endmacro |
1155 |
|
|
1156 |
|
ALIGN 16 |
1157 |
|
interpolate8x8_halfpel_add_mmx: |
1158 |
|
PROLOG rounding1_mmx, 1 |
1159 |
|
ADD_FF_MMX 1 |
1160 |
|
ADD_FF_MMX 1 |
1161 |
|
ADD_FF_MMX 1 |
1162 |
|
ADD_FF_MMX 1 |
1163 |
|
ADD_FF_MMX 1 |
1164 |
|
ADD_FF_MMX 1 |
1165 |
|
ADD_FF_MMX 1 |
1166 |
|
ADD_FF_MMX 0 |
1167 |
|
ret |
1168 |
|
ENDFUNC |
1169 |
|
|
1170 |
|
;=========================================================================== |
1171 |
|
; |
1172 |
|
; void interpolate8x8_halfpel_h_add_mmx(uint8_t * const dst, |
1173 |
|
; const uint8_t * const src, |
1174 |
|
; const uint32_t stride, |
1175 |
|
; const uint32_t rounding); |
1176 |
|
; |
1177 |
|
; |
1178 |
|
;=========================================================================== |
1179 |
|
|
1180 |
|
%macro ADD_FH_MMX 0 |
1181 |
|
movq mm0, [eax] |
1182 |
|
movq mm2, [eax+1] |
1183 |
|
movq mm1, mm0 |
1184 |
|
movq mm3, mm2 |
1185 |
|
|
1186 |
|
lea eax,[eax+edx] |
1187 |
|
|
1188 |
|
MIX |
1189 |
|
movq mm2, [ecx] ; prepare mix with Dst[0] |
1190 |
|
MIX_DST |
1191 |
|
movq [ecx], mm0 |
1192 |
|
%endmacro |
1193 |
|
|
1194 |
|
ALIGN 16 |
1195 |
|
interpolate8x8_halfpel_h_add_mmx: |
1196 |
|
PROLOG rounding1_mmx, 1 |
1197 |
|
|
1198 |
|
ADD_FH_MMX |
1199 |
|
lea ecx,[ecx+edx] |
1200 |
|
ADD_FH_MMX |
1201 |
|
lea ecx,[ecx+edx] |
1202 |
|
ADD_FH_MMX |
1203 |
|
lea ecx,[ecx+edx] |
1204 |
|
ADD_FH_MMX |
1205 |
|
lea ecx,[ecx+edx] |
1206 |
|
ADD_FH_MMX |
1207 |
|
lea ecx,[ecx+edx] |
1208 |
|
ADD_FH_MMX |
1209 |
|
lea ecx,[ecx+edx] |
1210 |
|
ADD_FH_MMX |
1211 |
|
lea ecx,[ecx+edx] |
1212 |
|
ADD_FH_MMX |
1213 |
|
ret |
1214 |
|
ENDFUNC |
1215 |
|
|
1216 |
|
;=========================================================================== |
1217 |
|
; |
1218 |
|
; void interpolate8x8_halfpel_v_add_mmx(uint8_t * const dst, |
1219 |
|
; const uint8_t * const src, |
1220 |
|
; const uint32_t stride, |
1221 |
|
; const uint32_t rounding); |
1222 |
|
; |
1223 |
|
; |
1224 |
|
;=========================================================================== |
1225 |
|
|
1226 |
|
%macro ADD_HF_MMX 0 |
1227 |
|
movq mm0, [eax] |
1228 |
|
movq mm2, [eax+edx] |
1229 |
|
movq mm1, mm0 |
1230 |
|
movq mm3, mm2 |
1231 |
|
|
1232 |
|
lea eax,[eax+edx] |
1233 |
|
|
1234 |
|
MIX |
1235 |
|
movq mm2, [ecx] ; prepare mix with Dst[0] |
1236 |
|
MIX_DST |
1237 |
|
movq [ecx], mm0 |
1238 |
|
|
1239 |
|
%endmacro |
1240 |
|
|
1241 |
|
ALIGN 16 |
1242 |
|
interpolate8x8_halfpel_v_add_mmx: |
1243 |
|
PROLOG rounding1_mmx, 1 |
1244 |
|
|
1245 |
|
ADD_HF_MMX |
1246 |
|
lea ecx,[ecx+edx] |
1247 |
|
ADD_HF_MMX |
1248 |
|
lea ecx,[ecx+edx] |
1249 |
|
ADD_HF_MMX |
1250 |
|
lea ecx,[ecx+edx] |
1251 |
|
ADD_HF_MMX |
1252 |
|
lea ecx,[ecx+edx] |
1253 |
|
ADD_HF_MMX |
1254 |
|
lea ecx,[ecx+edx] |
1255 |
|
ADD_HF_MMX |
1256 |
|
lea ecx,[ecx+edx] |
1257 |
|
ADD_HF_MMX |
1258 |
|
lea ecx,[ecx+edx] |
1259 |
|
ADD_HF_MMX |
1260 |
|
ret |
1261 |
|
ENDFUNC |
1262 |
|
|
1263 |
|
; The trick is to correct the result of 'pavgb' with some combination of the |
1264 |
|
; lsb's of the 4 input values i,j,k,l, and their intermediate 'pavgb' (s and t). |
1265 |
|
; The boolean relations are: |
1266 |
|
; (i+j+k+l+3)/4 = (s+t+1)/2 - (ij&kl)&st |
1267 |
|
; (i+j+k+l+2)/4 = (s+t+1)/2 - (ij|kl)&st |
1268 |
|
; (i+j+k+l+1)/4 = (s+t+1)/2 - (ij&kl)|st |
1269 |
|
; (i+j+k+l+0)/4 = (s+t+1)/2 - (ij|kl)|st |
1270 |
|
; with s=(i+j+1)/2, t=(k+l+1)/2, ij = i^j, kl = k^l, st = s^t. |
1271 |
|
|
1272 |
|
; Moreover, we process 2 lines at a times, for better overlapping (~15% faster). |
1273 |
|
|
1274 |
|
;=========================================================================== |
1275 |
|
; |
1276 |
|
; void interpolate8x8_halfpel_hv_add_mmx(uint8_t * const dst, |
1277 |
|
; const uint8_t * const src, |
1278 |
|
; const uint32_t stride, |
1279 |
|
; const uint32_t rounding); |
1280 |
|
; |
1281 |
|
; |
1282 |
|
;=========================================================================== |
1283 |
|
|
1284 |
|
%macro ADD_HH_MMX 0 |
1285 |
|
lea eax,[eax+edx] |
1286 |
|
|
1287 |
|
; transfert prev line to mm0/mm1 |
1288 |
|
movq mm0, mm2 |
1289 |
|
movq mm1, mm3 |
1290 |
|
|
1291 |
|
; load new line in mm2/mm3 |
1292 |
|
movq mm2, [eax] |
1293 |
|
movq mm4, [eax+1] |
1294 |
|
movq mm3, mm2 |
1295 |
|
movq mm5, mm4 |
1296 |
|
|
1297 |
|
punpcklbw mm2, mm6 |
1298 |
|
punpcklbw mm4, mm6 |
1299 |
|
paddusw mm2, mm4 |
1300 |
|
punpckhbw mm3, mm6 |
1301 |
|
punpckhbw mm5, mm6 |
1302 |
|
paddusw mm3, mm5 |
1303 |
|
|
1304 |
|
; mix current line (mm2/mm3) with previous (mm0,mm1); |
1305 |
|
; we'll preserve mm2/mm3 for next line... |
1306 |
|
|
1307 |
|
paddusw mm0, mm2 |
1308 |
|
paddusw mm1, mm3 |
1309 |
|
|
1310 |
|
movq mm4, [ecx] ; prepare mix with Dst[0] |
1311 |
|
movq mm5, mm4 |
1312 |
|
|
1313 |
|
paddusw mm0, mm7 ; finish mixing current line |
1314 |
|
paddusw mm1, mm7 |
1315 |
|
|
1316 |
|
punpcklbw mm4, mm6 |
1317 |
|
punpckhbw mm5, mm6 |
1318 |
|
|
1319 |
|
psrlw mm0, 2 |
1320 |
|
psrlw mm1, 2 |
1321 |
|
|
1322 |
|
paddusw mm0, mm4 ; mix Src(mm0/mm1) with Dst(mm2/mm3) |
1323 |
|
paddusw mm1, mm5 |
1324 |
|
|
1325 |
|
paddusw mm0, [rounding1_mmx] |
1326 |
|
paddusw mm1, [rounding1_mmx] |
1327 |
|
|
1328 |
|
psrlw mm0, 1 |
1329 |
|
psrlw mm1, 1 |
1330 |
|
|
1331 |
|
packuswb mm0, mm1 |
1332 |
|
|
1333 |
|
movq [ecx], mm0 |
1334 |
|
%endmacro |
1335 |
|
|
1336 |
|
ALIGN 16 |
1337 |
|
interpolate8x8_halfpel_hv_add_mmx: |
1338 |
|
PROLOG rounding2_mmx, 0 ; mm5 is busy. Don't load dst-rounder |
1339 |
|
|
1340 |
|
; preprocess first line |
1341 |
|
movq mm0, [eax] |
1342 |
|
movq mm2, [eax+1] |
1343 |
|
movq mm1, mm0 |
1344 |
|
movq mm3, mm2 |
1345 |
|
|
1346 |
|
punpcklbw mm0, mm6 |
1347 |
|
punpcklbw mm2, mm6 |
1348 |
|
punpckhbw mm1, mm6 |
1349 |
|
punpckhbw mm3, mm6 |
1350 |
|
paddusw mm2, mm0 |
1351 |
|
paddusw mm3, mm1 |
1352 |
|
|
1353 |
|
; Input: mm2/mm3 contains the value (Src[0]+Src[1]) of previous line |
1354 |
|
|
1355 |
|
ADD_HH_MMX |
1356 |
|
lea ecx,[ecx+edx] |
1357 |
|
ADD_HH_MMX |
1358 |
|
lea ecx,[ecx+edx] |
1359 |
|
ADD_HH_MMX |
1360 |
|
lea ecx,[ecx+edx] |
1361 |
|
ADD_HH_MMX |
1362 |
|
lea ecx,[ecx+edx] |
1363 |
|
ADD_HH_MMX |
1364 |
|
lea ecx,[ecx+edx] |
1365 |
|
ADD_HH_MMX |
1366 |
|
lea ecx,[ecx+edx] |
1367 |
|
ADD_HH_MMX |
1368 |
|
lea ecx,[ecx+edx] |
1369 |
|
ADD_HH_MMX |
1370 |
|
|
1371 |
|
ret |
1372 |
|
ENDFUNC |
1373 |
|
|
1374 |
|
|
1375 |
|
%ifidn __OUTPUT_FORMAT__,elf |
1376 |
|
section ".note.GNU-stack" noalloc noexec nowrite progbits |
1377 |
|
%endif |
1378 |
|
|