1 |
;/****************************************************************************** |
;/***************************************************************************** |
2 |
; * * |
; * |
3 |
; * This file is part of XviD, a free MPEG-4 video encoder/decoder * |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * * |
; * mmx optimized MPEG quantization/dequantization |
5 |
; * XviD is an implementation of a part of one or more MPEG-4 Video tools * |
; * |
6 |
; * as specified in ISO/IEC 14496-2 standard. Those intending to use this * |
; * Copyright(C) 2002 Peter Ross <pross@xvid.org> |
7 |
; * software module in hardware or software products are advised that its * |
; * Copyright(C) 2002 Michael Militzer <michael@xvid.org> |
8 |
; * use may infringe existing patents or copyrights, and any such use * |
; * Copyright(C) 2002 Pascal Massimino <skal@planet-d.net> |
9 |
; * would be at such party's own risk. The original developer of this * |
; * |
10 |
; * software module and his/her company, and subsequent editors and their * |
; * This file is part of XviD, a free MPEG-4 video encoder/decoder |
11 |
; * companies, will have no liability for use of this software or * |
; * |
12 |
; * modifications or derivatives thereof. * |
; * XviD is free software; you can redistribute it and/or modify it |
13 |
; * * |
; * under the terms of the GNU General Public License as published by |
14 |
; * XviD is free software; you can redistribute it and/or modify it * |
; * the Free Software Foundation; either version 2 of the License, or |
15 |
; * under the terms of the GNU General Public License as published by * |
; * (at your option) any later version. |
16 |
; * the Free Software Foundation; either version 2 of the License, or * |
; * |
17 |
; * (at your option) any later version. * |
; * This program is distributed in the hope that it will be useful, |
18 |
; * * |
; * but WITHOUT ANY WARRANTY; without even the implied warranty of |
19 |
; * XviD is distributed in the hope that it will be useful, but * |
; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
20 |
; * WITHOUT ANY WARRANTY; without even the implied warranty of * |
; * GNU General Public License for more details. |
21 |
; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * |
; * |
22 |
; * GNU General Public License for more details. * |
; * You should have received a copy of the GNU General Public License |
23 |
; * * |
; * along with this program; if not, write to the Free Software |
24 |
; * You should have received a copy of the GNU General Public License * |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
25 |
; * along with this program; if not, write to the Free Software * |
; * |
26 |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * |
; * Under section 8 of the GNU General Public License, the copyright |
27 |
; * * |
; * holders of XVID explicitly forbid distribution in the following |
28 |
; ******************************************************************************/ |
; * countries: |
29 |
; |
; * |
30 |
;/****************************************************************************** |
; * - Japan |
31 |
; * * |
; * - United States of America |
32 |
; * quantize4.asm, MMX optimized MPEG quantization/dequantization * |
; * |
33 |
; * * |
; * Linking XviD statically or dynamically with other modules is making a |
34 |
; * Copyright (C) 2002 - Peter Ross <pross@cs.rmit.edu.au> * |
; * combined work based on XviD. Thus, the terms and conditions of the |
35 |
; * Copyright (C) 2002 - Michael Militzer <isibaar@xvid.org> * |
; * GNU General Public License cover the whole combination. |
36 |
; * * |
; * |
37 |
; * For more information visit the XviD homepage: http://www.xvid.org * |
; * As a special exception, the copyright holders of XviD give you |
38 |
; * * |
; * permission to link XviD with independent modules that communicate with |
39 |
; ******************************************************************************/ |
; * XviD solely through the VFW1.1 and DShow interfaces, regardless of the |
40 |
; |
; * license terms of these independent modules, and to copy and distribute |
41 |
;/****************************************************************************** |
; * the resulting combined work under terms of your choice, provided that |
42 |
; * * |
; * every copy of the combined work is accompanied by a complete copy of |
43 |
; * Revision history: * |
; * the source code of XviD (the version of XviD used to produce the |
44 |
; * * |
; * combined work), being distributed under the terms of the GNU General |
45 |
; * 22.01.2002 initial version * |
; * Public License plus this exception. An independent module is a module |
46 |
; * * |
; * which is not derived from or based on XviD. |
47 |
; ******************************************************************************/ |
; * |
48 |
|
; * Note that people who make modified versions of XviD are not obligated |
49 |
|
; * to grant this special exception for their modified versions; it is |
50 |
|
; * their choice whether to do so. The GNU General Public License gives |
51 |
|
; * permission to release a modified version without this exception; this |
52 |
|
; * exception also makes it possible to release a modified version which |
53 |
|
; * carries forward this exception. |
54 |
|
; * |
55 |
|
; * $Id: quantize4_mmx.asm,v 1.7 2002-11-17 00:41:20 edgomez Exp $ |
56 |
|
; * |
57 |
|
; *************************************************************************/ |
58 |
|
|
59 |
; data/text alignment |
; data/text alignment |
60 |
%define ALIGN 8 |
%define ALIGN 8 |
247 |
;=========================================================================== |
;=========================================================================== |
248 |
|
|
249 |
align 16 |
align 16 |
250 |
mmx_32768_minus_2048 times 4 dw (32768-2048) |
|
251 |
mmx_32767_minus_2047 times 4 dw (32767-2047) |
mmx_32767_minus_2047 times 4 dw (32767-2047) |
252 |
|
mmx_32768_minus_2048 times 4 dw (32768-2048) |
253 |
|
mmx_2047 times 4 dw 2047 |
254 |
|
mmx_minus_2048 times 4 dw (-2048) |
255 |
|
zero times 4 dw 0 |
256 |
|
|
257 |
section .text |
section .text |
258 |
|
|
681 |
; |
; |
682 |
;=========================================================================== |
;=========================================================================== |
683 |
|
|
684 |
align 16 |
; Note: in order to saturate 'easily', we pre-shift the quantifier |
685 |
cglobal dequant4_intra_mmx |
; by 4. Then, the high-word of (coeff[]*matrix[i]*quant) are used to |
686 |
dequant4_intra_mmx |
; build a saturating mask. It is non-zero only when an overflow occured. |
687 |
|
; We thus avoid packing/unpacking toward double-word. |
688 |
|
; Moreover, we perform the mult (matrix[i]*quant) first, instead of, e.g., |
689 |
|
; (coeff[i]*matrix[i]). This is less prone to overflow if coeff[] are not |
690 |
|
; checked. Input ranges are: coeff in [-127,127], inter_matrix in [1..255],a |
691 |
|
; and quant in [1..31]. |
692 |
|
; |
693 |
|
; The original loop is: |
694 |
|
; |
695 |
|
%if 0 |
696 |
|
movq mm0, [ecx+8*eax + 8*16] ; mm0 = coeff[i] |
697 |
|
pxor mm1, mm1 |
698 |
|
pcmpgtw mm1, mm0 |
699 |
|
pxor mm0, mm1 ; change sign if negative |
700 |
|
psubw mm0, mm1 ; -> mm0 = abs(coeff[i]), mm1 = sign of coeff[i] |
701 |
|
|
702 |
|
movq mm2, mm7 ; mm2 = quant |
703 |
|
pmullw mm2, [intra_matrix + 8*eax + 8*16 ] ; matrix[i]*quant. |
704 |
|
|
705 |
|
movq mm6, mm2 |
706 |
|
pmulhw mm2, mm0 ; high of coeff*(matrix*quant) (should be 0 if no overflow) |
707 |
|
pmullw mm0, mm6 ; low of coeff*(matrix*quant) |
708 |
|
|
709 |
|
pxor mm5, mm5 |
710 |
|
pcmpgtw mm2, mm5 ; otherflow? |
711 |
|
psrlw mm2, 5 ; =0 if no clamp, 2047 otherwise |
712 |
|
psrlw mm0, 5 |
713 |
|
paddw mm0, mm1 ; start restoring sign |
714 |
|
por mm0, mm2 ; saturate to 2047 if needed |
715 |
|
pxor mm0, mm1 ; finish negating back |
716 |
|
|
717 |
push esi |
movq [edx + 8*eax + 8*16], mm0 ; data[i] |
718 |
push edi |
add eax, 1 |
719 |
|
%endif |
720 |
|
|
721 |
mov edi, [esp + 8 + 4] ; data |
;******************************************************************** |
|
mov esi, [esp + 8 + 8] ; coeff |
|
|
mov eax, [esp + 8 + 12] ; quant |
|
722 |
|
|
723 |
movq mm7, [mmx_mul_quant + eax*8 - 8] |
align 16 |
724 |
|
cglobal dequant4_intra_mmx |
725 |
|
dequant4_intra_mmx: |
726 |
|
|
727 |
xor eax, eax |
mov edx, [esp+4] ; data |
728 |
|
mov ecx, [esp+8] ; coeff |
729 |
|
mov eax, [esp+12] ; quant |
730 |
|
|
731 |
|
movq mm7, [mmx_mul_quant + eax*8 - 8] |
732 |
|
mov eax, -16 ; to keep aligned, we regularly process coeff[0] |
733 |
|
psllw mm7, 2 ; << 2. See comment. |
734 |
|
pxor mm6, mm6 ; this is a NOP |
735 |
|
|
736 |
align 16 |
align 16 |
737 |
.loop |
.loop |
738 |
movq mm0, [esi + 8*eax] ; mm0 = [coeff] |
movq mm0, [ecx+8*eax + 8*16] ; mm0 = c = coeff[i] |
739 |
|
movq mm3, [ecx+8*eax + 8*16 +8]; mm3 = c' = coeff[i+1] |
740 |
|
pxor mm1, mm1 |
741 |
|
pxor mm4, mm4 |
742 |
|
pcmpgtw mm1, mm0 ; mm1 = sgn(c) |
743 |
|
movq mm2, mm7 ; mm2 = quant |
744 |
|
|
745 |
pxor mm1, mm1 ; mm1 = 0 |
pcmpgtw mm4, mm3 ; mm4 = sgn(c') |
746 |
pcmpeqw mm1, mm0 ; mm1 = (0 == mm0) |
pmullw mm2, [intra_matrix + 8*eax + 8*16 ] ; matrix[i]*quant |
747 |
|
|
748 |
pxor mm2, mm2 ; mm2 = 0 |
pxor mm0, mm1 ; negate if negative |
749 |
pcmpgtw mm2, mm0 ; mm2 = (0 > mm0) |
pxor mm3, mm4 ; negate if negative |
|
pxor mm0, mm2 ; mm0 = |mm0| |
|
|
psubw mm0, mm2 ; displace |
|
750 |
|
|
751 |
pmullw mm0, mm7 ; mm0 *= quant |
psubw mm0, mm1 |
752 |
|
psubw mm3, mm4 |
753 |
|
|
754 |
movq mm3, [intra_matrix + 8*eax] |
; we're short on register, here. Poor pairing... |
755 |
|
|
756 |
movq mm4, mm0 ; |
movq mm5, mm2 |
757 |
pmullw mm0, mm3 ; mm0 = low(mm0 * mm3) |
pmullw mm2, mm0 ; low of coeff*(matrix*quant) |
|
pmulhw mm3, mm4 ; mm3 = high(mm0 * mm3) |
|
758 |
|
|
759 |
movq mm4, mm0 ; mm0,mm4 = unpack(mm3, mm0) |
pmulhw mm0, mm5 ; high of coeff*(matrix*quant) |
760 |
punpcklwd mm0, mm3 ; |
movq mm5, mm7 ; mm2 = quant |
|
punpckhwd mm4, mm3 ; |
|
|
psrld mm0, 3 ; mm0,mm4 /= 8 |
|
|
psrld mm4, 3 ; |
|
|
packssdw mm0, mm4 ; mm0 = pack(mm4, mm0) |
|
761 |
|
|
762 |
pxor mm0, mm2 ; mm0 *= sign(mm0) |
pmullw mm5, [intra_matrix + 8*eax + 8*16 +8] ; matrix[i+1]*quant |
|
psubw mm0, mm2 ; undisplace |
|
|
pandn mm1, mm0 ; mm1 = ~(iszero) & mm0 |
|
763 |
|
|
764 |
%ifdef SATURATE |
movq mm6, mm5 |
765 |
movq mm2, [mmx_32767_minus_2047] |
add eax,2 ; z-flag will be tested later |
|
movq mm6, [mmx_32768_minus_2048] |
|
|
paddsw mm1, mm2 |
|
|
psubsw mm1, mm2 |
|
|
psubsw mm1, mm6 |
|
|
paddsw mm1, mm6 |
|
|
%endif |
|
766 |
|
|
767 |
movq [edi + 8*eax], mm1 ; [data] = mm0 |
pmullw mm6, mm3 ; low of coeff*(matrix*quant) |
768 |
|
pmulhw mm3, mm5 ; high of coeff*(matrix*quant) |
769 |
|
|
770 |
add eax, 1 |
pcmpgtw mm0, [zero] |
771 |
cmp eax, 16 |
paddusw mm2, mm0 |
772 |
jnz near .loop |
psrlw mm2, 5 |
773 |
|
|
774 |
mov ax, [esi] ; ax = data[0] |
pcmpgtw mm3, [zero] |
775 |
imul ax, [esp + 8 + 16] ; eax = data[0] * dcscalar |
paddusw mm6, mm3 |
776 |
mov [edi], ax ; data[0] = ax |
psrlw mm6, 5 |
|
|
|
|
%ifdef SATURATE |
|
|
cmp ax, -2048 |
|
|
jl .set_n2048 |
|
|
cmp ax, 2047 |
|
|
jg .set_2047 |
|
|
%endif |
|
777 |
|
|
778 |
pop edi |
pxor mm2, mm1 ; start negating back |
779 |
pop esi |
pxor mm6, mm4 ; start negating back |
|
ret |
|
780 |
|
|
781 |
%ifdef SATURATE |
psubusw mm1, mm0 |
782 |
.set_n2048 |
psubusw mm4, mm3 |
|
mov word [edi], -2048 |
|
|
pop edi |
|
|
pop esi |
|
|
ret |
|
783 |
|
|
784 |
.set_2047 |
psubw mm2, mm1 ; finish negating back |
785 |
mov word [edi], 2047 |
psubw mm6, mm4 ; finish negating back |
|
pop edi |
|
|
pop esi |
|
786 |
|
|
787 |
ret |
movq [edx + 8*eax + 8*16 -2*8 ], mm2 ; data[i] |
788 |
%endif |
movq [edx + 8*eax + 8*16 -2*8 +8], mm6 ; data[i+1] |
789 |
|
|
790 |
|
jnz near .loop |
791 |
|
|
792 |
|
; deal with DC |
793 |
|
|
794 |
|
movd mm0, [ecx] |
795 |
|
pmullw mm0, [esp+16] ; dcscalar |
796 |
|
movq mm2, [mmx_32767_minus_2047] |
797 |
|
paddsw mm0, mm2 |
798 |
|
psubsw mm0, mm2 |
799 |
|
movq mm2, [mmx_32768_minus_2048] |
800 |
|
psubsw mm0, mm2 |
801 |
|
paddsw mm0, mm2 |
802 |
|
movd eax, mm0 |
803 |
|
mov [edx], ax |
804 |
|
|
805 |
|
ret |
806 |
|
|
807 |
;=========================================================================== |
;=========================================================================== |
808 |
; |
; |
812 |
; |
; |
813 |
;=========================================================================== |
;=========================================================================== |
814 |
|
|
815 |
|
; Note: We use (2*c + sgn(c) - sgn(-c)) as multiplier |
816 |
|
; so we handle the 3 cases: c<0, c==0, and c>0 in one shot. |
817 |
|
; sgn(x) is the result of 'pcmpgtw 0,x': 0 if x>=0, -1 if x<0. |
818 |
|
; It's mixed with the extraction of the absolute value. |
819 |
|
|
820 |
align 16 |
align 16 |
821 |
cglobal dequant4_inter_mmx |
cglobal dequant4_inter_mmx |
822 |
dequant4_inter_mmx |
dequant4_inter_mmx: |
|
|
|
|
push esi |
|
|
push edi |
|
823 |
|
|
824 |
mov edi, [esp + 8 + 4] ; data |
mov edx, [esp+ 4] ; data |
825 |
mov esi, [esp + 8 + 8] ; coeff |
mov ecx, [esp+ 8] ; coeff |
826 |
mov eax, [esp + 8 + 12] ; quant |
mov eax, [esp+12] ; quant |
827 |
movq mm7, [mmx_mul_quant + eax*8 - 8] |
movq mm7, [mmx_mul_quant + eax*8 - 8] |
828 |
movq mm6, [mmx_one] |
mov eax, -16 |
829 |
xor eax, eax |
paddw mm7, mm7 ; << 1 |
830 |
pxor mm5, mm5 ; mismatch sum |
pxor mm6, mm6 ; mismatch sum |
|
|
|
831 |
|
|
832 |
align 16 |
align 16 |
833 |
.loop |
.loop |
834 |
movq mm0, [esi + 8*eax] ; mm0 = [coeff] |
movq mm0, [ecx+8*eax + 8*16 ] ; mm0 = coeff[i] |
835 |
|
movq mm2, [ecx+8*eax + 8*16 +8] ; mm2 = coeff[i+1] |
836 |
pxor mm1, mm1 ; mm1 = 0 |
add eax,2 |
837 |
pcmpeqw mm1, mm0 ; mm1 = (0 == mm0) |
|
838 |
|
pxor mm1, mm1 |
839 |
pxor mm2, mm2 ; mm2 = 0 |
pxor mm3, mm3 |
840 |
pcmpgtw mm2, mm0 ; mm2 = (0 > mm0) |
pcmpgtw mm1, mm0 ; mm1 = sgn(c) (preserved) |
841 |
pxor mm0, mm2 ; mm0 = |mm0| |
pcmpgtw mm3, mm2 ; mm3 = sgn(c') (preserved) |
842 |
psubw mm0, mm2 ; displace |
paddsw mm0, mm1 ; c += sgn(c) |
843 |
|
paddsw mm2, mm3 ; c += sgn(c') |
844 |
psllw mm0, 1 ; |
paddw mm0, mm0 ; c *= 2 |
845 |
paddsw mm0, mm6 ; mm0 = 2*mm0 + 1 |
paddw mm2, mm2 ; c'*= 2 |
|
pmullw mm0, mm7 ; mm0 *= quant |
|
|
|
|
|
movq mm3, [inter_matrix + 8*eax] |
|
|
|
|
|
movq mm4, mm0 |
|
|
pmullw mm0, mm3 ; mm0 = low(mm0 * mm3) |
|
|
pmulhw mm3, mm4 ; mm3 = high(mm0 * mm3) |
|
|
|
|
|
movq mm4, mm0 ; mm0,mm4 = unpack(mm3, mm0) |
|
|
punpcklwd mm0, mm3 ; |
|
|
punpckhwd mm4, mm3 ; |
|
846 |
|
|
847 |
psrad mm0, 4 ; mm0,mm4 /= 16 |
pxor mm4, mm4 |
848 |
psrad mm4, 4 ; |
pxor mm5, mm5 |
849 |
packssdw mm0, mm4 ; mm0 = pack(mm4, mm0) |
psubw mm4, mm0 ; -c |
850 |
|
psubw mm5, mm2 ; -c' |
851 |
pxor mm0, mm2 ; mm0 *= sign(mm0) |
psraw mm4, 16 ; mm4 = sgn(-c) |
852 |
psubw mm0, mm2 ; undisplace |
psraw mm5, 16 ; mm5 = sgn(-c') |
853 |
pandn mm1, mm0 ; mm1 = ~(iszero) & mm0 |
psubsw mm0, mm4 ; c -= sgn(-c) |
854 |
|
psubsw mm2, mm5 ; c' -= sgn(-c') |
855 |
|
pxor mm0, mm1 ; finish changing sign if needed |
856 |
;%ifdef SATURATE |
pxor mm2, mm3 ; finish changing sign if needed |
857 |
movq mm2, [mmx_32767_minus_2047] |
|
858 |
movq mm4, [mmx_32768_minus_2048] |
; we're short on register, here. Poor pairing... |
859 |
paddsw mm1, mm2 |
|
860 |
psubsw mm1, mm2 |
movq mm4, mm7 ; (matrix*quant) |
861 |
psubsw mm1, mm4 |
pmullw mm4, [inter_matrix + 8*eax + 8*16 -2*8] |
862 |
paddsw mm1, mm4 |
movq mm5, mm4 |
863 |
;%endif |
pmulhw mm5, mm0 ; high of c*(matrix*quant) |
864 |
|
pmullw mm0, mm4 ; low of c*(matrix*quant) |
865 |
pxor mm5, mm1 ; mismatch |
|
866 |
|
movq mm4, mm7 ; (matrix*quant) |
867 |
movq [edi + 8*eax], mm1 ; [data] = mm0 |
pmullw mm4, [inter_matrix + 8*eax + 8*16 -2*8 + 8] |
868 |
|
|
869 |
|
pcmpgtw mm5, [zero] |
870 |
|
paddusw mm0, mm5 |
871 |
|
psrlw mm0, 5 |
872 |
|
pxor mm0, mm1 ; start restoring sign |
873 |
|
psubusw mm1, mm5 |
874 |
|
|
875 |
|
movq mm5, mm4 |
876 |
|
pmulhw mm5, mm2 ; high of c*(matrix*quant) |
877 |
|
pmullw mm2, mm4 ; low of c*(matrix*quant) |
878 |
|
psubw mm0, mm1 ; finish restoring sign |
879 |
|
|
880 |
|
pcmpgtw mm5, [zero] |
881 |
|
paddusw mm2, mm5 |
882 |
|
psrlw mm2, 5 |
883 |
|
pxor mm2, mm3 ; start restoring sign |
884 |
|
psubusw mm3, mm5 |
885 |
|
psubw mm2, mm3 ; finish restoring sign |
886 |
|
|
887 |
|
pxor mm6, mm0 ; mismatch control |
888 |
|
movq [edx + 8*eax + 8*16 -2*8 ], mm0 ; data[i] |
889 |
|
pxor mm6, mm2 ; mismatch control |
890 |
|
movq [edx + 8*eax + 8*16 -2*8 +8], mm2 ; data[i+1] |
891 |
|
|
|
add eax, 1 |
|
|
cmp eax, 16 |
|
892 |
jnz near .loop |
jnz near .loop |
893 |
|
|
894 |
; mismatch control |
; mismatch control |
895 |
|
|
896 |
movq mm0, mm5 |
movq mm0, mm6 |
|
movq mm1, mm5 |
|
|
movq mm2, mm5 |
|
897 |
psrlq mm0, 48 |
psrlq mm0, 48 |
898 |
|
movq mm1, mm6 |
899 |
|
movq mm2, mm6 |
900 |
psrlq mm1, 32 |
psrlq mm1, 32 |
901 |
|
pxor mm6, mm0 |
902 |
psrlq mm2, 16 |
psrlq mm2, 16 |
903 |
pxor mm5, mm0 |
pxor mm6, mm1 |
904 |
pxor mm5, mm1 |
pxor mm6, mm2 |
905 |
pxor mm5, mm2 |
movd eax, mm6 |
906 |
|
and eax, 1 |
907 |
movd eax, mm5 |
xor eax, 1 |
908 |
test eax, 0x1 |
xor word [edx + 2*63], ax |
|
jnz .done |
|
|
|
|
|
xor word [edi + 2*63], 1 |
|
|
|
|
|
.done |
|
|
pop edi |
|
|
pop esi |
|
909 |
|
|
910 |
ret |
ret |
911 |
|
|