23 |
; * |
; * |
24 |
; * History: |
; * History: |
25 |
; * |
; * |
26 |
|
; * 05.10.2002 added some qpel mmx code - Isibaar |
27 |
; * 06.07.2002 mmx cleanup - Isibaar |
; * 06.07.2002 mmx cleanup - Isibaar |
28 |
; * 22.12.2001 inital version; (c)2001 peter ross <pross@cs.rmit.edu.au> |
; * 22.12.2001 inital version; (c)2001 peter ross <pross@cs.rmit.edu.au> |
29 |
; * |
; * |
46 |
align 16 |
align 16 |
47 |
|
|
48 |
;=========================================================================== |
;=========================================================================== |
49 |
|
; (16 - r) rounding table |
50 |
|
;=========================================================================== |
51 |
|
|
52 |
|
rounding_lowpass_mmx |
53 |
|
times 4 dw 16 |
54 |
|
times 4 dw 15 |
55 |
|
|
56 |
|
;=========================================================================== |
57 |
; (1 - r) rounding table |
; (1 - r) rounding table |
58 |
;=========================================================================== |
;=========================================================================== |
59 |
|
|
72 |
mmx_one |
mmx_one |
73 |
times 8 db 1 |
times 8 db 1 |
74 |
|
|
75 |
|
mmx_two |
76 |
|
times 8 db 2 |
77 |
|
|
78 |
|
mmx_three |
79 |
|
times 8 db 3 |
80 |
|
|
81 |
|
mmx_five |
82 |
|
times 4 dw 5 |
83 |
|
|
84 |
|
mmx_mask |
85 |
|
times 8 db 254 |
86 |
|
|
87 |
|
mmx_mask2 |
88 |
|
times 8 db 252 |
89 |
|
|
90 |
section .text |
section .text |
91 |
|
|
92 |
%macro CALC_AVG 6 |
%macro CALC_AVG 6 |
319 |
pop esi |
pop esi |
320 |
|
|
321 |
ret |
ret |
322 |
|
|
323 |
|
;=========================================================================== |
324 |
|
; |
325 |
|
; void interpolate8x8_avg2_mmx(uint8_t const *dst, |
326 |
|
; const uint8_t * const src1, |
327 |
|
; const uint8_t * const src2, |
328 |
|
; const uint32_t stride, |
329 |
|
; const uint32_t rounding); |
330 |
|
; |
331 |
|
;=========================================================================== |
332 |
|
|
333 |
|
%macro AVG2_MMX_RND0 0 |
334 |
|
movq mm0, [eax] ; src1 -> mm0 |
335 |
|
movq mm1, [ebx] ; src2 -> mm1 |
336 |
|
|
337 |
|
movq mm4, [eax+edx] |
338 |
|
movq mm5, [ebx+edx] |
339 |
|
|
340 |
|
movq mm2, mm0 ; src1 -> mm2 |
341 |
|
movq mm3, mm1 ; src2 -> mm3 |
342 |
|
|
343 |
|
pand mm2, mm7 ; isolate the lsb |
344 |
|
pand mm3, mm7 ; isolate the lsb |
345 |
|
|
346 |
|
por mm2, mm3 ; ODD(src1) OR ODD(src2) -> mm2 |
347 |
|
|
348 |
|
movq mm3, mm4 |
349 |
|
movq mm6, mm5 |
350 |
|
|
351 |
|
pand mm3, mm7 |
352 |
|
pand mm6, mm7 |
353 |
|
|
354 |
|
por mm3, mm6 |
355 |
|
|
356 |
|
pand mm0, [mmx_mask] |
357 |
|
pand mm1, [mmx_mask] |
358 |
|
pand mm4, [mmx_mask] |
359 |
|
pand mm5, [mmx_mask] |
360 |
|
|
361 |
|
psrlq mm0, 1 ; src1 / 2 |
362 |
|
psrlq mm1, 1 ; src2 / 2 |
363 |
|
|
364 |
|
psrlq mm4, 1 |
365 |
|
psrlq mm5, 1 |
366 |
|
|
367 |
|
paddb mm0, mm1 ; src1/2 + src2/2 -> mm0 |
368 |
|
paddb mm0, mm2 ; correct rounding error |
369 |
|
|
370 |
|
paddb mm4, mm5 |
371 |
|
paddb mm4, mm3 |
372 |
|
|
373 |
|
lea eax,[eax+2*edx] |
374 |
|
lea ebx,[ebx+2*edx] |
375 |
|
|
376 |
|
movq [ecx], mm0 ; (src1 + src2 + 1) / 2 -> dst |
377 |
|
movq [ecx+edx], mm4 |
378 |
|
%endmacro |
379 |
|
|
380 |
|
%macro AVG2_MMX_RND1 0 |
381 |
|
movq mm0, [eax] ; src1 -> mm0 |
382 |
|
movq mm1, [ebx] ; src2 -> mm1 |
383 |
|
|
384 |
|
movq mm4, [eax+edx] |
385 |
|
movq mm5, [ebx+edx] |
386 |
|
|
387 |
|
movq mm2, mm0 ; src1 -> mm2 |
388 |
|
movq mm3, mm1 ; src2 -> mm3 |
389 |
|
|
390 |
|
pand mm2, mm7 ; isolate the lsb |
391 |
|
pand mm3, mm7 ; isolate the lsb |
392 |
|
|
393 |
|
pand mm2, mm3 ; ODD(src1) AND ODD(src2) -> mm2 |
394 |
|
|
395 |
|
movq mm3, mm4 |
396 |
|
movq mm6, mm5 |
397 |
|
|
398 |
|
pand mm3, mm7 |
399 |
|
pand mm6, mm7 |
400 |
|
|
401 |
|
pand mm3, mm6 |
402 |
|
|
403 |
|
pand mm0, [mmx_mask] |
404 |
|
pand mm1, [mmx_mask] |
405 |
|
pand mm4, [mmx_mask] |
406 |
|
pand mm5, [mmx_mask] |
407 |
|
|
408 |
|
psrlq mm0, 1 ; src1 / 2 |
409 |
|
psrlq mm1, 1 ; src2 / 2 |
410 |
|
|
411 |
|
psrlq mm4, 1 |
412 |
|
psrlq mm5, 1 |
413 |
|
|
414 |
|
paddb mm0, mm1 ; src1/2 + src2/2 -> mm0 |
415 |
|
paddb mm0, mm2 ; correct rounding error |
416 |
|
|
417 |
|
paddb mm4, mm5 |
418 |
|
paddb mm4, mm3 |
419 |
|
|
420 |
|
lea eax,[eax+2*edx] |
421 |
|
lea ebx,[ebx+2*edx] |
422 |
|
|
423 |
|
movq [ecx], mm0 ; (src1 + src2 + 1) / 2 -> dst |
424 |
|
movq [ecx+edx], mm4 |
425 |
|
%endmacro |
426 |
|
|
427 |
|
align 16 |
428 |
|
cglobal interpolate8x8_avg2_mmx |
429 |
|
interpolate8x8_avg2_mmx |
430 |
|
|
431 |
|
push ebx |
432 |
|
|
433 |
|
mov eax, [esp + 4 + 20] ; rounding |
434 |
|
|
435 |
|
test eax, eax |
436 |
|
|
437 |
|
mov ecx, [esp + 4 + 4] ; dst -> edi |
438 |
|
mov eax, [esp + 4 + 8] ; src1 -> esi |
439 |
|
mov ebx, [esp + 4 + 12] ; src2 -> eax |
440 |
|
mov edx, [esp + 4 + 16] ; stride -> edx |
441 |
|
|
442 |
|
movq mm7, [mmx_one] |
443 |
|
|
444 |
|
jnz near .rounding1 |
445 |
|
|
446 |
|
AVG2_MMX_RND0 |
447 |
|
lea ecx, [ecx+2*edx] |
448 |
|
AVG2_MMX_RND0 |
449 |
|
lea ecx, [ecx+2*edx] |
450 |
|
AVG2_MMX_RND0 |
451 |
|
lea ecx, [ecx+2*edx] |
452 |
|
AVG2_MMX_RND0 |
453 |
|
|
454 |
|
pop ebx |
455 |
|
ret |
456 |
|
|
457 |
|
.rounding1 |
458 |
|
AVG2_MMX_RND1 |
459 |
|
lea ecx, [ecx+2*edx] |
460 |
|
AVG2_MMX_RND1 |
461 |
|
lea ecx, [ecx+2*edx] |
462 |
|
AVG2_MMX_RND1 |
463 |
|
lea ecx, [ecx+2*edx] |
464 |
|
AVG2_MMX_RND1 |
465 |
|
|
466 |
|
pop ebx |
467 |
|
ret |
468 |
|
|
469 |
|
|
470 |
|
;=========================================================================== |
471 |
|
; |
472 |
|
; void interpolate8x8_avg4_mmx(uint8_t const *dst, |
473 |
|
; const uint8_t * const src1, |
474 |
|
; const uint8_t * const src2, |
475 |
|
; const uint8_t * const src3, |
476 |
|
; const uint8_t * const src4, |
477 |
|
; const uint32_t stride, |
478 |
|
; const uint32_t rounding); |
479 |
|
; |
480 |
|
;=========================================================================== |
481 |
|
|
482 |
|
%macro AVG4_MMX_RND0 0 |
483 |
|
movq mm0, [eax] ; src1 -> mm0 |
484 |
|
movq mm1, [ebx] ; src2 -> mm1 |
485 |
|
|
486 |
|
movq mm2, mm0 |
487 |
|
movq mm3, mm1 |
488 |
|
|
489 |
|
pand mm2, [mmx_three] |
490 |
|
pand mm3, [mmx_three] |
491 |
|
|
492 |
|
pand mm0, [mmx_mask2] |
493 |
|
pand mm1, [mmx_mask2] |
494 |
|
|
495 |
|
psrlq mm0, 2 |
496 |
|
psrlq mm1, 2 |
497 |
|
|
498 |
|
lea eax, [eax+edx] |
499 |
|
lea ebx, [ebx+edx] |
500 |
|
|
501 |
|
paddb mm0, mm1 |
502 |
|
paddb mm2, mm3 |
503 |
|
|
504 |
|
movq mm4, [esi] ; src3 -> mm0 |
505 |
|
movq mm5, [edi] ; src4 -> mm1 |
506 |
|
|
507 |
|
movq mm1, mm4 |
508 |
|
movq mm3, mm5 |
509 |
|
|
510 |
|
pand mm1, [mmx_three] |
511 |
|
pand mm3, [mmx_three] |
512 |
|
|
513 |
|
pand mm4, [mmx_mask2] |
514 |
|
pand mm5, [mmx_mask2] |
515 |
|
|
516 |
|
psrlq mm4, 2 |
517 |
|
psrlq mm5, 2 |
518 |
|
|
519 |
|
paddb mm4, mm5 |
520 |
|
paddb mm0, mm4 |
521 |
|
|
522 |
|
paddb mm1, mm3 |
523 |
|
paddb mm2, mm1 |
524 |
|
|
525 |
|
paddb mm2, [mmx_two] |
526 |
|
pand mm2, [mmx_mask2] |
527 |
|
|
528 |
|
psrlq mm2, 2 |
529 |
|
paddb mm0, mm2 |
530 |
|
|
531 |
|
lea esi, [esi+edx] |
532 |
|
lea edi, [edi+edx] |
533 |
|
|
534 |
|
movq [ecx], mm0 ; (src1 + src2 + src3 + src4 + 2) / 4 -> dst |
535 |
|
%endmacro |
536 |
|
|
537 |
|
%macro AVG4_MMX_RND1 0 |
538 |
|
movq mm0, [eax] ; src1 -> mm0 |
539 |
|
movq mm1, [ebx] ; src2 -> mm1 |
540 |
|
|
541 |
|
movq mm2, mm0 |
542 |
|
movq mm3, mm1 |
543 |
|
|
544 |
|
pand mm2, [mmx_three] |
545 |
|
pand mm3, [mmx_three] |
546 |
|
|
547 |
|
pand mm0, [mmx_mask2] |
548 |
|
pand mm1, [mmx_mask2] |
549 |
|
|
550 |
|
psrlq mm0, 2 |
551 |
|
psrlq mm1, 2 |
552 |
|
|
553 |
|
lea eax,[eax+edx] |
554 |
|
lea ebx,[ebx+edx] |
555 |
|
|
556 |
|
paddb mm0, mm1 |
557 |
|
paddb mm2, mm3 |
558 |
|
|
559 |
|
movq mm4, [esi] ; src3 -> mm0 |
560 |
|
movq mm5, [edi] ; src4 -> mm1 |
561 |
|
|
562 |
|
movq mm1, mm4 |
563 |
|
movq mm3, mm5 |
564 |
|
|
565 |
|
pand mm1, [mmx_three] |
566 |
|
pand mm3, [mmx_three] |
567 |
|
|
568 |
|
pand mm4, [mmx_mask2] |
569 |
|
pand mm5, [mmx_mask2] |
570 |
|
|
571 |
|
psrlq mm4, 2 |
572 |
|
psrlq mm5, 2 |
573 |
|
|
574 |
|
paddb mm4, mm5 |
575 |
|
paddb mm0, mm4 |
576 |
|
|
577 |
|
paddb mm1, mm3 |
578 |
|
paddb mm2, mm1 |
579 |
|
|
580 |
|
paddb mm2, [mmx_one] |
581 |
|
pand mm2, [mmx_mask2] |
582 |
|
|
583 |
|
psrlq mm2, 2 |
584 |
|
paddb mm0, mm2 |
585 |
|
|
586 |
|
lea esi,[esi+edx] |
587 |
|
lea edi,[edi+edx] |
588 |
|
|
589 |
|
movq [ecx], mm0 ; (src1 + src2 + src3 + src4 + 2) / 4 -> dst |
590 |
|
%endmacro |
591 |
|
|
592 |
|
align 16 |
593 |
|
cglobal interpolate8x8_avg4_mmx |
594 |
|
interpolate8x8_avg4_mmx |
595 |
|
|
596 |
|
push ebx |
597 |
|
push edi |
598 |
|
push esi |
599 |
|
|
600 |
|
mov eax, [esp + 12 + 28] ; rounding |
601 |
|
|
602 |
|
test eax, eax |
603 |
|
|
604 |
|
mov ecx, [esp + 12 + 4] ; dst -> edi |
605 |
|
mov eax, [esp + 12 + 8] ; src1 -> esi |
606 |
|
mov ebx, [esp + 12 + 12] ; src2 -> eax |
607 |
|
mov esi, [esp + 12 + 16] ; src3 -> esi |
608 |
|
mov edi, [esp + 12 + 20] ; src4 -> edi |
609 |
|
mov edx, [esp + 12 + 24] ; stride -> edx |
610 |
|
|
611 |
|
movq mm7, [mmx_one] |
612 |
|
|
613 |
|
jnz near .rounding1 |
614 |
|
|
615 |
|
AVG4_MMX_RND0 |
616 |
|
lea ecx, [ecx+edx] |
617 |
|
AVG4_MMX_RND0 |
618 |
|
lea ecx, [ecx+edx] |
619 |
|
AVG4_MMX_RND0 |
620 |
|
lea ecx, [ecx+edx] |
621 |
|
AVG4_MMX_RND0 |
622 |
|
lea ecx, [ecx+edx] |
623 |
|
AVG4_MMX_RND0 |
624 |
|
lea ecx, [ecx+edx] |
625 |
|
AVG4_MMX_RND0 |
626 |
|
lea ecx, [ecx+edx] |
627 |
|
AVG4_MMX_RND0 |
628 |
|
lea ecx, [ecx+edx] |
629 |
|
AVG4_MMX_RND0 |
630 |
|
|
631 |
|
pop esi |
632 |
|
pop edi |
633 |
|
pop ebx |
634 |
|
ret |
635 |
|
|
636 |
|
.rounding1 |
637 |
|
AVG4_MMX_RND1 |
638 |
|
lea ecx, [ecx+edx] |
639 |
|
AVG4_MMX_RND1 |
640 |
|
lea ecx, [ecx+edx] |
641 |
|
AVG4_MMX_RND1 |
642 |
|
lea ecx, [ecx+edx] |
643 |
|
AVG4_MMX_RND1 |
644 |
|
lea ecx, [ecx+edx] |
645 |
|
AVG4_MMX_RND1 |
646 |
|
lea ecx, [ecx+edx] |
647 |
|
AVG4_MMX_RND1 |
648 |
|
lea ecx, [ecx+edx] |
649 |
|
AVG4_MMX_RND1 |
650 |
|
lea ecx, [ecx+edx] |
651 |
|
AVG4_MMX_RND1 |
652 |
|
|
653 |
|
pop esi |
654 |
|
pop edi |
655 |
|
pop ebx |
656 |
|
ret |
657 |
|
|
658 |
|
|
659 |
|
;=========================================================================== |
660 |
|
; |
661 |
|
; void interpolate8x8_6tap_lowpass_h_mmx(uint8_t const *dst, |
662 |
|
; const uint8_t * const src, |
663 |
|
; const uint32_t stride, |
664 |
|
; const uint32_t rounding); |
665 |
|
; |
666 |
|
;=========================================================================== |
667 |
|
|
668 |
|
%macro LOWPASS_6TAP_H_MMX 0 |
669 |
|
movq mm0, [eax] |
670 |
|
movq mm2, [eax+1] |
671 |
|
|
672 |
|
movq mm1, mm0 |
673 |
|
movq mm3, mm2 |
674 |
|
|
675 |
|
punpcklbw mm0, mm7 |
676 |
|
punpcklbw mm2, mm7 |
677 |
|
|
678 |
|
punpckhbw mm1, mm7 |
679 |
|
punpckhbw mm3, mm7 |
680 |
|
|
681 |
|
paddw mm0, mm2 |
682 |
|
paddw mm1, mm3 |
683 |
|
|
684 |
|
psllw mm0, 2 |
685 |
|
psllw mm1, 2 |
686 |
|
|
687 |
|
movq mm2, [eax-1] |
688 |
|
movq mm4, [eax+2] |
689 |
|
|
690 |
|
movq mm3, mm2 |
691 |
|
movq mm5, mm4 |
692 |
|
|
693 |
|
punpcklbw mm2, mm7 |
694 |
|
punpcklbw mm4, mm7 |
695 |
|
|
696 |
|
punpckhbw mm3, mm7 |
697 |
|
punpckhbw mm5, mm7 |
698 |
|
|
699 |
|
paddw mm2, mm4 |
700 |
|
paddw mm3, mm5 |
701 |
|
|
702 |
|
psubsw mm0, mm2 |
703 |
|
psubsw mm1, mm3 |
704 |
|
|
705 |
|
pmullw mm0, [mmx_five] |
706 |
|
pmullw mm1, [mmx_five] |
707 |
|
|
708 |
|
movq mm2, [eax-2] |
709 |
|
movq mm4, [eax+3] |
710 |
|
|
711 |
|
movq mm3, mm2 |
712 |
|
movq mm5, mm4 |
713 |
|
|
714 |
|
punpcklbw mm2, mm7 |
715 |
|
punpcklbw mm4, mm7 |
716 |
|
|
717 |
|
punpckhbw mm3, mm7 |
718 |
|
punpckhbw mm5, mm7 |
719 |
|
|
720 |
|
paddw mm2, mm4 |
721 |
|
paddw mm3, mm5 |
722 |
|
|
723 |
|
paddsw mm0, mm2 |
724 |
|
paddsw mm1, mm3 |
725 |
|
|
726 |
|
paddsw mm0, mm6 |
727 |
|
paddsw mm1, mm6 |
728 |
|
|
729 |
|
psraw mm0, 5 |
730 |
|
psraw mm1, 5 |
731 |
|
|
732 |
|
lea eax, [eax+edx] |
733 |
|
packuswb mm0, mm1 |
734 |
|
movq [ecx], mm0 |
735 |
|
%endmacro |
736 |
|
|
737 |
|
align 16 |
738 |
|
cglobal interpolate8x8_6tap_lowpass_h_mmx |
739 |
|
interpolate8x8_6tap_lowpass_h_mmx |
740 |
|
|
741 |
|
mov eax, [esp + 16] ; rounding |
742 |
|
|
743 |
|
movq mm6, [rounding_lowpass_mmx + eax * 8] |
744 |
|
|
745 |
|
mov ecx, [esp + 4] ; dst -> edi |
746 |
|
mov eax, [esp + 8] ; src -> esi |
747 |
|
mov edx, [esp + 12] ; stride -> edx |
748 |
|
|
749 |
|
pxor mm7, mm7 |
750 |
|
|
751 |
|
LOWPASS_6TAP_H_MMX |
752 |
|
lea ecx, [ecx+edx] |
753 |
|
LOWPASS_6TAP_H_MMX |
754 |
|
lea ecx, [ecx+edx] |
755 |
|
LOWPASS_6TAP_H_MMX |
756 |
|
lea ecx, [ecx+edx] |
757 |
|
LOWPASS_6TAP_H_MMX |
758 |
|
lea ecx, [ecx+edx] |
759 |
|
LOWPASS_6TAP_H_MMX |
760 |
|
lea ecx, [ecx+edx] |
761 |
|
LOWPASS_6TAP_H_MMX |
762 |
|
lea ecx, [ecx+edx] |
763 |
|
LOWPASS_6TAP_H_MMX |
764 |
|
lea ecx, [ecx+edx] |
765 |
|
LOWPASS_6TAP_H_MMX |
766 |
|
|
767 |
|
ret |
768 |
|
|
769 |
|
;=========================================================================== |
770 |
|
; |
771 |
|
; void interpolate8x8_6tap_lowpass_v_mmx(uint8_t const *dst, |
772 |
|
; const uint8_t * const src, |
773 |
|
; const uint32_t stride, |
774 |
|
; const uint32_t rounding); |
775 |
|
; |
776 |
|
;=========================================================================== |
777 |
|
|
778 |
|
%macro LOWPASS_6TAP_V_MMX 0 |
779 |
|
movq mm0, [eax] |
780 |
|
movq mm2, [eax+edx] |
781 |
|
|
782 |
|
movq mm1, mm0 |
783 |
|
movq mm3, mm2 |
784 |
|
|
785 |
|
punpcklbw mm0, mm7 |
786 |
|
punpcklbw mm2, mm7 |
787 |
|
|
788 |
|
punpckhbw mm1, mm7 |
789 |
|
punpckhbw mm3, mm7 |
790 |
|
|
791 |
|
paddw mm0, mm2 |
792 |
|
paddw mm1, mm3 |
793 |
|
|
794 |
|
psllw mm0, 2 |
795 |
|
psllw mm1, 2 |
796 |
|
|
797 |
|
movq mm4, [eax+2*edx] |
798 |
|
sub eax, ebx |
799 |
|
movq mm2, [eax+2*edx] |
800 |
|
|
801 |
|
movq mm3, mm2 |
802 |
|
movq mm5, mm4 |
803 |
|
|
804 |
|
punpcklbw mm2, mm7 |
805 |
|
punpcklbw mm4, mm7 |
806 |
|
|
807 |
|
punpckhbw mm3, mm7 |
808 |
|
punpckhbw mm5, mm7 |
809 |
|
|
810 |
|
paddw mm2, mm4 |
811 |
|
paddw mm3, mm5 |
812 |
|
|
813 |
|
psubsw mm0, mm2 |
814 |
|
psubsw mm1, mm3 |
815 |
|
|
816 |
|
pmullw mm0, [mmx_five] |
817 |
|
pmullw mm1, [mmx_five] |
818 |
|
|
819 |
|
movq mm2, [eax+edx] |
820 |
|
movq mm4, [eax+2*ebx] |
821 |
|
|
822 |
|
movq mm3, mm2 |
823 |
|
movq mm5, mm4 |
824 |
|
|
825 |
|
punpcklbw mm2, mm7 |
826 |
|
punpcklbw mm4, mm7 |
827 |
|
|
828 |
|
punpckhbw mm3, mm7 |
829 |
|
punpckhbw mm5, mm7 |
830 |
|
|
831 |
|
paddw mm2, mm4 |
832 |
|
paddw mm3, mm5 |
833 |
|
|
834 |
|
paddsw mm0, mm2 |
835 |
|
paddsw mm1, mm3 |
836 |
|
|
837 |
|
paddsw mm0, mm6 |
838 |
|
paddsw mm1, mm6 |
839 |
|
|
840 |
|
psraw mm0, 5 |
841 |
|
psraw mm1, 5 |
842 |
|
|
843 |
|
lea eax, [eax+4*edx] |
844 |
|
packuswb mm0, mm1 |
845 |
|
movq [ecx], mm0 |
846 |
|
%endmacro |
847 |
|
|
848 |
|
align 16 |
849 |
|
cglobal interpolate8x8_6tap_lowpass_v_mmx |
850 |
|
interpolate8x8_6tap_lowpass_v_mmx |
851 |
|
|
852 |
|
push ebx |
853 |
|
|
854 |
|
mov eax, [esp + 4 + 16] ; rounding |
855 |
|
|
856 |
|
movq mm6, [rounding_lowpass_mmx + eax * 8] |
857 |
|
|
858 |
|
mov ecx, [esp + 4 + 4] ; dst -> edi |
859 |
|
mov eax, [esp + 4 + 8] ; src -> esi |
860 |
|
mov edx, [esp + 4 + 12] ; stride -> edx |
861 |
|
|
862 |
|
mov ebx, edx |
863 |
|
shl ebx, 1 |
864 |
|
add ebx, edx |
865 |
|
|
866 |
|
pxor mm7, mm7 |
867 |
|
|
868 |
|
LOWPASS_6TAP_V_MMX |
869 |
|
lea ecx, [ecx+edx] |
870 |
|
LOWPASS_6TAP_V_MMX |
871 |
|
lea ecx, [ecx+edx] |
872 |
|
LOWPASS_6TAP_V_MMX |
873 |
|
lea ecx, [ecx+edx] |
874 |
|
LOWPASS_6TAP_V_MMX |
875 |
|
lea ecx, [ecx+edx] |
876 |
|
LOWPASS_6TAP_V_MMX |
877 |
|
lea ecx, [ecx+edx] |
878 |
|
LOWPASS_6TAP_V_MMX |
879 |
|
lea ecx, [ecx+edx] |
880 |
|
LOWPASS_6TAP_V_MMX |
881 |
|
lea ecx, [ecx+edx] |
882 |
|
LOWPASS_6TAP_V_MMX |
883 |
|
|
884 |
|
pop ebx |
885 |
|
ret |