--- branches/dev-api-4/xvidcore/src/quant/x86_asm/quantize_mpeg_mmx.asm 2003/10/07 14:16:29 1175 +++ branches/dev-api-4/xvidcore/src/quant/x86_asm/quantize_mpeg_mmx.asm 2003/10/09 18:50:22 1176 @@ -1,51 +1,29 @@ -;/****************************************************************************** -; * * -; * This file is part of XviD, a free MPEG-4 video encoder/decoder * -; * * -; * XviD is an implementation of a part of one or more MPEG-4 Video tools * -; * as specified in ISO/IEC 14496-2 standard. Those intending to use this * -; * software module in hardware or software products are advised that its * -; * use may infringe existing patents or copyrights, and any such use * -; * would be at such party's own risk. The original developer of this * -; * software module and his/her company, and subsequent editors and their * -; * companies, will have no liability for use of this software or * -; * modifications or derivatives thereof. * -; * * -; * XviD is free software; you can redistribute it and/or modify it * -; * under the terms of the GNU General Public License as published by * -; * the Free Software Foundation; either version 2 of the License, or * -; * (at your option) any later version. * -; * * -; * XviD is distributed in the hope that it will be useful, but * -; * WITHOUT ANY WARRANTY; without even the implied warranty of * -; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * -; * GNU General Public License for more details. * -; * * -; * You should have received a copy of the GNU General Public License * -; * along with this program; if not, write to the Free Software * -; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * -; * * -; ******************************************************************************/ -; -;/****************************************************************************** -; * * -; * quantize4.asm, MMX optimized MPEG quantization/dequantization * -; * * -; * Copyright (C) 2002 - Peter Ross * -; * Copyright (C) 2002 - Michael Militzer * -; * * -; * For more information visit the XviD homepage: http://www.xvid.org * -; * * -; ******************************************************************************/ -; -;/****************************************************************************** -; * * -; * Revision history: * -; * * -; * 14.06.2002 mmx dequant4_* funcs revamped -Skal- * -; * 22.01.2002 initial version * -; * * -; ******************************************************************************/ +;/************************************************************************** +; * +; * XVID MPEG-4 VIDEO CODEC +; * - 3dne Quantization/Dequantization - +; * +; * Copyright (C) 2002-2003 Peter Ross +; * 2002-2003 Michael Militzer +; * 2002-2003 Pascal Massimino +; * +; * This program is free software ; you can redistribute it and/or modify +; * it under the terms of the GNU General Public License as published by +; * the Free Software Foundation ; either version 2 of the License, or +; * (at your option) any later version. +; * +; * This program is distributed in the hope that it will be useful, +; * but WITHOUT ANY WARRANTY ; without even the implied warranty of +; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +; * GNU General Public License for more details. +; * +; * You should have received a copy of the GNU General Public License +; * along with this program ; if not, write to the Free Software +; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +; * +; * $Id: quantize_mpeg_mmx.asm,v 1.1.2.2 2003-10-09 18:50:22 edgomez Exp $ +; * +; *************************************************************************/ ; data/text alignment %define ALIGN 8 @@ -54,13 +32,6 @@ bits 32 -%ifdef FORMAT_COFF -SECTION .data data -%else -SECTION .data data align=8 -%endif - - %macro cglobal 1 %ifdef PREFIX global _%1 @@ -79,7 +50,18 @@ %endif %endmacro -mmx_one times 4 dw 1 +;*************************************************************************** +; Local data +;*************************************************************************** + +%ifdef FORMAT_COFF +SECTION .data data +%else +SECTION .data data align=8 +%endif + +mmx_one: + times 4 dw 1 ;=========================================================================== ; @@ -87,44 +69,13 @@ ; ;=========================================================================== -%macro MMX_DIV 1 -times 4 dw (1 << 17) / (%1 * 2) + 1 -%endmacro - -align ALIGN -mmx_div - MMX_DIV 1 - MMX_DIV 2 - MMX_DIV 3 - MMX_DIV 4 - MMX_DIV 5 - MMX_DIV 6 - MMX_DIV 7 - MMX_DIV 8 - MMX_DIV 9 - MMX_DIV 10 - MMX_DIV 11 - MMX_DIV 12 - MMX_DIV 13 - MMX_DIV 14 - MMX_DIV 15 - MMX_DIV 16 - MMX_DIV 17 - MMX_DIV 18 - MMX_DIV 19 - MMX_DIV 20 - MMX_DIV 21 - MMX_DIV 22 - MMX_DIV 23 - MMX_DIV 24 - MMX_DIV 25 - MMX_DIV 26 - MMX_DIV 27 - MMX_DIV 28 - MMX_DIV 29 - MMX_DIV 30 - MMX_DIV 31 - +align 16 +mmx_div: +%assign quant 1 +%rep 31 + times 4 dw (1<<17) / (quant*2) + 1 + %assign quant quant+1 +%endrep ;=========================================================================== ; @@ -155,43 +106,12 @@ ; ;=========================================================================== -%macro MMX_QUANTD 1 -times 4 dw ((VM18P*%1) + (VM18Q/2)) / VM18Q -%endmacro - -quantd - MMX_QUANTD 1 - MMX_QUANTD 2 - MMX_QUANTD 3 - MMX_QUANTD 4 - MMX_QUANTD 5 - MMX_QUANTD 6 - MMX_QUANTD 7 - MMX_QUANTD 8 - MMX_QUANTD 9 - MMX_QUANTD 10 - MMX_QUANTD 11 - MMX_QUANTD 12 - MMX_QUANTD 13 - MMX_QUANTD 14 - MMX_QUANTD 15 - MMX_QUANTD 16 - MMX_QUANTD 17 - MMX_QUANTD 18 - MMX_QUANTD 19 - MMX_QUANTD 20 - MMX_QUANTD 21 - MMX_QUANTD 22 - MMX_QUANTD 23 - MMX_QUANTD 24 - MMX_QUANTD 25 - MMX_QUANTD 26 - MMX_QUANTD 27 - MMX_QUANTD 28 - MMX_QUANTD 29 - MMX_QUANTD 30 - MMX_QUANTD 31 - +quantd: +%assign quant 1 +%rep 31 + times 4 dw ((VM18P*quant) + (VM18Q/2)) / VM18Q + %assign quant quant+1 +%endrep ;=========================================================================== ; @@ -199,42 +119,12 @@ ; ;=========================================================================== -%macro MMX_MUL_QUANT 1 -times 4 dw %1 -%endmacro - -mmx_mul_quant - MMX_MUL_QUANT 1 - MMX_MUL_QUANT 2 - MMX_MUL_QUANT 3 - MMX_MUL_QUANT 4 - MMX_MUL_QUANT 5 - MMX_MUL_QUANT 6 - MMX_MUL_QUANT 7 - MMX_MUL_QUANT 8 - MMX_MUL_QUANT 9 - MMX_MUL_QUANT 10 - MMX_MUL_QUANT 11 - MMX_MUL_QUANT 12 - MMX_MUL_QUANT 13 - MMX_MUL_QUANT 14 - MMX_MUL_QUANT 15 - MMX_MUL_QUANT 16 - MMX_MUL_QUANT 17 - MMX_MUL_QUANT 18 - MMX_MUL_QUANT 19 - MMX_MUL_QUANT 20 - MMX_MUL_QUANT 21 - MMX_MUL_QUANT 22 - MMX_MUL_QUANT 23 - MMX_MUL_QUANT 24 - MMX_MUL_QUANT 25 - MMX_MUL_QUANT 26 - MMX_MUL_QUANT 27 - MMX_MUL_QUANT 28 - MMX_MUL_QUANT 29 - MMX_MUL_QUANT 30 - MMX_MUL_QUANT 31 +mmx_mul_quant: +%assign quant 1 +%rep 31 + times 4 dw quant + %assign quant quant+1 +%endrep ;=========================================================================== ; @@ -244,20 +134,29 @@ align 16 -mmx_32767_minus_2047 times 4 dw (32767-2047) -mmx_32768_minus_2048 times 4 dw (32768-2048) -mmx_2047 times 4 dw 2047 -mmx_minus_2048 times 4 dw (-2048) -zero times 4 dw 0 +mmx_32767_minus_2047: + times 4 dw (32767-2047) +mmx_32768_minus_2048: + times 4 dw (32768-2048) +mmx_2047: + times 4 dw 2047 +mmx_minus_2048: + times 4 dw (-2048) +zero: + times 4 dw 0 + +;*************************************************************************** +; Local data +;*************************************************************************** section .text ;=========================================================================== ; -; void quant_intra4_mmx(int16_t * coeff, -; const int16_t const * data, -; const uint32_t quant, -; const uint32_t dcscalar); +; uint32_t quant_mpeg_intra_mmx(int16_t * coeff, +; const int16_t const * data, +; const uint32_t quant, +; const uint32_t dcscalar); ; ;=========================================================================== @@ -265,215 +164,215 @@ cglobal quant_mpeg_intra_mmx quant_mpeg_intra_mmx: - push ecx - push esi - push edi + push ecx + push esi + push edi - mov edi, [esp + 12 + 4] ; coeff - mov esi, [esp + 12 + 8] ; data - mov eax, [esp + 12 + 12] ; quant + mov edi, [esp + 12 + 4] ; coeff + mov esi, [esp + 12 + 8] ; data + mov eax, [esp + 12 + 12] ; quant - movq mm5, [quantd + eax * 8 - 8] ; quantd -> mm5 + movq mm5, [quantd + eax * 8 - 8] ; quantd -> mm5 - xor ecx, ecx - cmp al, 1 - jz near .q1loop + xor ecx, ecx + cmp al, 1 + jz near .q1loop - cmp al, 2 - jz near .q2loop + cmp al, 2 + jz near .q2loop - movq mm7, [mmx_div + eax * 8 - 8] ; multipliers[quant] -> mm7 + movq mm7, [mmx_div + eax * 8 - 8] ; multipliers[quant] -> mm7 align ALIGN .loop - movq mm0, [esi + 8*ecx] ; mm0 = [1st] - movq mm3, [esi + 8*ecx + 8] ; - - pxor mm1, mm1 ; mm1 = 0 - pxor mm4, mm4 - - pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) - pcmpgtw mm4, mm3 - - pxor mm0, mm1 ; mm0 = |mm0| - pxor mm3, mm4 ; - psubw mm0, mm1 ; displace - psubw mm3, mm4 ; - - psllw mm0, 4 ; level << 4 - psllw mm3, 4 ; + movq mm0, [esi + 8*ecx] ; mm0 = [1st] + movq mm3, [esi + 8*ecx + 8] ; - movq mm2, [intra_matrix + 8*ecx] - psrlw mm2, 1 ; intra_matrix[i]>>1 - paddw mm0, mm2 + pxor mm1, mm1 ; mm1 = 0 + pxor mm4, mm4 - movq mm2, [intra_matrix_fix + ecx*8] - pmulhw mm0, mm2 ; (level<<4 + intra_matrix[i]>>1) / intra_matrix[i] - - movq mm2, [intra_matrix + 8*ecx + 8] - psrlw mm2, 1 - paddw mm3, mm2 - - movq mm2, [intra_matrix_fix + ecx*8 + 8] - pmulhw mm3, mm2 - - paddw mm0, mm5 ; + quantd - paddw mm3, mm5 - - pmulhw mm0, mm7 ; mm0 = (mm0 / 2Q) >> 16 - pmulhw mm3, mm7 ; - psrlw mm0, 1 ; additional shift by 1 => 16 + 1 = 17 - psrlw mm3, 1 + pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) + pcmpgtw mm4, mm3 - pxor mm0, mm1 ; mm0 *= sign(mm0) - pxor mm3, mm4 ; - psubw mm0, mm1 ; undisplace - psubw mm3, mm4 ; - - movq [edi + 8*ecx], mm0 - movq [edi + 8*ecx + 8], mm3 - - add ecx,2 - cmp ecx,16 - jnz near .loop + pxor mm0, mm1 ; mm0 = |mm0| + pxor mm3, mm4 ; + psubw mm0, mm1 ; displace + psubw mm3, mm4 ; + + psllw mm0, 4 ; level << 4 + psllw mm3, 4 ; + + movq mm2, [intra_matrix + 8*ecx] + psrlw mm2, 1 ; intra_matrix[i]>>1 + paddw mm0, mm2 + + movq mm2, [intra_matrix_fix + ecx*8] + pmulhw mm0, mm2 ; (level<<4 + intra_matrix[i]>>1) / intra_matrix[i] + + movq mm2, [intra_matrix + 8*ecx + 8] + psrlw mm2, 1 + paddw mm3, mm2 + + movq mm2, [intra_matrix_fix + ecx*8 + 8] + pmulhw mm3, mm2 + + paddw mm0, mm5 ; + quantd + paddw mm3, mm5 + pmulhw mm0, mm7 ; mm0 = (mm0 / 2Q) >> 16 + pmulhw mm3, mm7 ; + psrlw mm0, 1 ; additional shift by 1 => 16 + 1 = 17 + psrlw mm3, 1 + + pxor mm0, mm1 ; mm0 *= sign(mm0) + pxor mm3, mm4 ; + psubw mm0, mm1 ; undisplace + psubw mm3, mm4 ; + + movq [edi + 8*ecx], mm0 + movq [edi + 8*ecx + 8], mm3 + + add ecx,2 + cmp ecx,16 + jnz near .loop .done ; caclulate data[0] // (int32_t)dcscalar) - mov ecx, [esp + 12 + 16] ; dcscalar - mov edx, ecx - movsx eax, word [esi] ; data[0] - shr edx, 1 ; edx = dcscalar /2 - cmp eax, 0 - jg .gtzero + mov ecx, [esp + 12 + 16] ; dcscalar + mov edx, ecx + movsx eax, word [esi] ; data[0] + shr edx, 1 ; edx = dcscalar /2 + cmp eax, 0 + jg .gtzero - sub eax, edx - jmp short .mul + sub eax, edx + jmp short .mul .gtzero - add eax, edx + add eax, edx .mul - cdq ; expand eax -> edx:eax - idiv ecx ; eax = edx:eax / dcscalar + cdq ; expand eax -> edx:eax + idiv ecx ; eax = edx:eax / dcscalar - mov [edi], ax ; coeff[0] = ax + mov [edi], ax ; coeff[0] = ax - pop edi - pop esi - pop ecx + pop edi + pop esi + pop ecx - ret + xor eax, eax ; return(0); + ret align ALIGN .q1loop - movq mm0, [esi + 8*ecx] ; mm0 = [1st] - movq mm3, [esi + 8*ecx + 8] ; + movq mm0, [esi + 8*ecx] ; mm0 = [1st] + movq mm3, [esi + 8*ecx + 8] ; - pxor mm1, mm1 ; mm1 = 0 - pxor mm4, mm4 ; + pxor mm1, mm1 ; mm1 = 0 + pxor mm4, mm4 ; - pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) - pcmpgtw mm4, mm3 ; + pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) + pcmpgtw mm4, mm3 ; - pxor mm0, mm1 ; mm0 = |mm0| - pxor mm3, mm4 ; - psubw mm0, mm1 ; displace - psubw mm3, mm4 ; - - psllw mm0, 4 - psllw mm3, 4 - - movq mm2, [intra_matrix + 8*ecx] - psrlw mm2, 1 - paddw mm0, mm2 - - movq mm2, [intra_matrix_fix + ecx*8] - pmulhw mm0, mm2 ; (level<<4 + intra_matrix[i]>>1) / intra_matrix[i] - - movq mm2, [intra_matrix + 8*ecx + 8] - psrlw mm2, 1 - paddw mm3, mm2 - - movq mm2, [intra_matrix_fix + ecx*8 + 8] - pmulhw mm3, mm2 - - paddw mm0, mm5 - paddw mm3, mm5 - - psrlw mm0, 1 ; mm0 >>= 1 (/2) - psrlw mm3, 1 ; - - pxor mm0, mm1 ; mm0 *= sign(mm0) - pxor mm3, mm4 ; - psubw mm0, mm1 ; undisplace - psubw mm3, mm4 ; - - movq [edi + 8*ecx], mm0 - movq [edi + 8*ecx + 8], mm3 - - add ecx,2 - cmp ecx,16 - jnz near .q1loop - jmp near .done + pxor mm0, mm1 ; mm0 = |mm0| + pxor mm3, mm4 ; + psubw mm0, mm1 ; displace + psubw mm3, mm4 ; + + psllw mm0, 4 + psllw mm3, 4 + + movq mm2, [intra_matrix + 8*ecx] + psrlw mm2, 1 + paddw mm0, mm2 + + movq mm2, [intra_matrix_fix + ecx*8] + pmulhw mm0, mm2 ; (level<<4 + intra_matrix[i]>>1) / intra_matrix[i] + + movq mm2, [intra_matrix + 8*ecx + 8] + psrlw mm2, 1 + paddw mm3, mm2 + + movq mm2, [intra_matrix_fix + ecx*8 + 8] + pmulhw mm3, mm2 + + paddw mm0, mm5 + paddw mm3, mm5 + + psrlw mm0, 1 ; mm0 >>= 1 (/2) + psrlw mm3, 1 ; + + pxor mm0, mm1 ; mm0 *= sign(mm0) + pxor mm3, mm4 ; + psubw mm0, mm1 ; undisplace + psubw mm3, mm4 ; + + movq [edi + 8*ecx], mm0 + movq [edi + 8*ecx + 8], mm3 + + add ecx, 2 + cmp ecx, 16 + jnz near .q1loop + jmp near .done align ALIGN .q2loop - movq mm0, [esi + 8*ecx] ; mm0 = [1st] - movq mm3, [esi + 8*ecx + 8] ; + movq mm0, [esi + 8*ecx] ; mm0 = [1st] + movq mm3, [esi + 8*ecx + 8] ; - pxor mm1, mm1 ; mm1 = 0 - pxor mm4, mm4 ; + pxor mm1, mm1 ; mm1 = 0 + pxor mm4, mm4 ; - pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) - pcmpgtw mm4, mm3 ; + pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) + pcmpgtw mm4, mm3 ; - pxor mm0, mm1 ; mm0 = |mm0| - pxor mm3, mm4 ; - psubw mm0, mm1 ; displace - psubw mm3, mm4 ; + pxor mm0, mm1 ; mm0 = |mm0| + pxor mm3, mm4 ; + psubw mm0, mm1 ; displace + psubw mm3, mm4 ; - psllw mm0, 4 - psllw mm3, 4 + psllw mm0, 4 + psllw mm3, 4 - movq mm2, [intra_matrix + 8*ecx] - psrlw mm2, 1 - paddw mm0, mm2 + movq mm2, [intra_matrix + 8*ecx] + psrlw mm2, 1 + paddw mm0, mm2 - movq mm2, [intra_matrix_fix + ecx*8] - pmulhw mm0, mm2 ; (level<<4 + intra_matrix[i]>>1) / intra_matrix[i] + movq mm2, [intra_matrix_fix + ecx*8] + pmulhw mm0, mm2 ; (level<<4 + intra_matrix[i]>>1) / intra_matrix[i] - movq mm2, [intra_matrix + 8*ecx + 8] - psrlw mm2, 1 - paddw mm3, mm2 + movq mm2, [intra_matrix + 8*ecx + 8] + psrlw mm2, 1 + paddw mm3, mm2 - movq mm2, [intra_matrix_fix + ecx*8 + 8] - pmulhw mm3, mm2 + movq mm2, [intra_matrix_fix + ecx*8 + 8] + pmulhw mm3, mm2 - paddw mm0, mm5 - paddw mm3, mm5 + paddw mm0, mm5 + paddw mm3, mm5 - psrlw mm0, 2 ; mm0 >>= 1 (/4) - psrlw mm3, 2 ; + psrlw mm0, 2 ; mm0 >>= 1 (/4) + psrlw mm3, 2 ; - pxor mm0, mm1 ; mm0 *= sign(mm0) - pxor mm3, mm4 ; - psubw mm0, mm1 ; undisplace - psubw mm3, mm4 ; + pxor mm0, mm1 ; mm0 *= sign(mm0) + pxor mm3, mm4 ; + psubw mm0, mm1 ; undisplace + psubw mm3, mm4 ; - movq [edi + 8*ecx], mm0 - movq [edi + 8*ecx + 8], mm3 + movq [edi + 8*ecx], mm0 + movq [edi + 8*ecx + 8], mm3 - add ecx,2 - cmp ecx,16 - jnz near .q2loop - jmp near .done + add ecx,2 + cmp ecx,16 + jnz near .q2loop + jmp near .done ;=========================================================================== ; -; uint32_t quant4_inter_mmx(int16_t * coeff, -; const int16_t const * data, -; const uint32_t quant); +; uint32_t quant_mpeg_inter_mmx(int16_t * coeff, +; const int16_t const * data, +; const uint32_t quant); ; ;=========================================================================== @@ -481,199 +380,199 @@ cglobal quant_mpeg_inter_mmx quant_mpeg_inter_mmx: - push ecx - push esi - push edi + push ecx + push esi + push edi - mov edi, [esp + 12 + 4] ; coeff - mov esi, [esp + 12 + 8] ; data - mov eax, [esp + 12 + 12] ; quant + mov edi, [esp + 12 + 4] ; coeff + mov esi, [esp + 12 + 8] ; data + mov eax, [esp + 12 + 12] ; quant - xor ecx, ecx + xor ecx, ecx - pxor mm5, mm5 ; sum + pxor mm5, mm5 ; sum - cmp al, 1 - jz near .q1loop + cmp al, 1 + jz near .q1loop - cmp al, 2 - jz near .q2loop + cmp al, 2 + jz near .q2loop - movq mm7, [mmx_div + eax * 8 - 8] ; divider + movq mm7, [mmx_div + eax * 8 - 8] ; divider align ALIGN .loop - movq mm0, [esi + 8*ecx] ; mm0 = [1st] - movq mm3, [esi + 8*ecx + 8] ; - pxor mm1, mm1 ; mm1 = 0 - pxor mm4, mm4 ; - pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) - pcmpgtw mm4, mm3 ; - pxor mm0, mm1 ; mm0 = |mm0| - pxor mm3, mm4 ; - psubw mm0, mm1 ; displace - psubw mm3, mm4 ; - - psllw mm0, 4 - psllw mm3, 4 - - movq mm2, [inter_matrix + 8*ecx] - psrlw mm2, 1 - paddw mm0, mm2 - - movq mm2, [inter_matrix_fix + ecx*8] - pmulhw mm0, mm2 ; (level<<4 + inter_matrix[i]>>1) / inter_matrix[i] - - movq mm2, [inter_matrix + 8*ecx + 8] - psrlw mm2, 1 - paddw mm3, mm2 - - movq mm2, [inter_matrix_fix + ecx*8 + 8] - pmulhw mm3, mm2 - - pmulhw mm0, mm7 ; mm0 = (mm0 / 2Q) >> 16 - pmulhw mm3, mm7 ; - psrlw mm0, 1 ; additional shift by 1 => 16 + 1 = 17 - psrlw mm3, 1 - - paddw mm5, mm0 ; sum += mm0 - pxor mm0, mm1 ; mm0 *= sign(mm0) - paddw mm5, mm3 ; - pxor mm3, mm4 ; - psubw mm0, mm1 ; undisplace - psubw mm3, mm4 - movq [edi + 8*ecx], mm0 - movq [edi + 8*ecx + 8], mm3 - - add ecx, 2 - cmp ecx, 16 - jnz near .loop + movq mm0, [esi + 8*ecx] ; mm0 = [1st] + movq mm3, [esi + 8*ecx + 8] ; + pxor mm1, mm1 ; mm1 = 0 + pxor mm4, mm4 ; + pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) + pcmpgtw mm4, mm3 ; + pxor mm0, mm1 ; mm0 = |mm0| + pxor mm3, mm4 ; + psubw mm0, mm1 ; displace + psubw mm3, mm4 ; + + psllw mm0, 4 + psllw mm3, 4 + + movq mm2, [inter_matrix + 8*ecx] + psrlw mm2, 1 + paddw mm0, mm2 + + movq mm2, [inter_matrix_fix + ecx*8] + pmulhw mm0, mm2 ; (level<<4 + inter_matrix[i]>>1) / inter_matrix[i] + + movq mm2, [inter_matrix + 8*ecx + 8] + psrlw mm2, 1 + paddw mm3, mm2 + + movq mm2, [inter_matrix_fix + ecx*8 + 8] + pmulhw mm3, mm2 + + pmulhw mm0, mm7 ; mm0 = (mm0 / 2Q) >> 16 + pmulhw mm3, mm7 ; + psrlw mm0, 1 ; additional shift by 1 => 16 + 1 = 17 + psrlw mm3, 1 + + paddw mm5, mm0 ; sum += mm0 + pxor mm0, mm1 ; mm0 *= sign(mm0) + paddw mm5, mm3 ; + pxor mm3, mm4 ; + psubw mm0, mm1 ; undisplace + psubw mm3, mm4 + movq [edi + 8*ecx], mm0 + movq [edi + 8*ecx + 8], mm3 + + add ecx, 2 + cmp ecx, 16 + jnz near .loop .done - pmaddwd mm5, [mmx_one] - movq mm0, mm5 - psrlq mm5, 32 - paddd mm0, mm5 - movd eax, mm0 ; return sum - - pop edi - pop esi - pop ecx + pmaddwd mm5, [mmx_one] + movq mm0, mm5 + psrlq mm5, 32 + paddd mm0, mm5 + movd eax, mm0 ; return sum + + pop edi + pop esi + pop ecx - ret + ret align ALIGN .q1loop - movq mm0, [esi + 8*ecx] ; mm0 = [1st] - movq mm3, [esi + 8*ecx+ 8] + movq mm0, [esi + 8*ecx] ; mm0 = [1st] + movq mm3, [esi + 8*ecx+ 8] ; - pxor mm1, mm1 ; mm1 = 0 - pxor mm4, mm4 ; + pxor mm1, mm1 ; mm1 = 0 + pxor mm4, mm4 ; - pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) - pcmpgtw mm4, mm3 ; + pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) + pcmpgtw mm4, mm3 ; - pxor mm0, mm1 ; mm0 = |mm0| - pxor mm3, mm4 ; - psubw mm0, mm1 ; displace - psubw mm3, mm4 ; - - psllw mm0, 4 - psllw mm3, 4 + pxor mm0, mm1 ; mm0 = |mm0| + pxor mm3, mm4 ; + psubw mm0, mm1 ; displace + psubw mm3, mm4 ; - movq mm2, [inter_matrix + 8*ecx] - psrlw mm2, 1 - paddw mm0, mm2 + psllw mm0, 4 + psllw mm3, 4 - movq mm2, [inter_matrix_fix + ecx*8] - pmulhw mm0, mm2 ; (level<<4 + inter_matrix[i]>>1) / inter_matrix[i] + movq mm2, [inter_matrix + 8*ecx] + psrlw mm2, 1 + paddw mm0, mm2 - movq mm2, [inter_matrix + 8*ecx + 8] - psrlw mm2, 1 - paddw mm3, mm2 + movq mm2, [inter_matrix_fix + ecx*8] + pmulhw mm0, mm2 ; (level<<4 + inter_matrix[i]>>1) / inter_matrix[i] - movq mm2, [inter_matrix_fix + ecx*8 + 8] - pmulhw mm3, mm2 + movq mm2, [inter_matrix + 8*ecx + 8] + psrlw mm2, 1 + paddw mm3, mm2 + + movq mm2, [inter_matrix_fix + ecx*8 + 8] + pmulhw mm3, mm2 - psrlw mm0, 1 ; mm0 >>= 1 (/2) - psrlw mm3, 1 ; + psrlw mm0, 1 ; mm0 >>= 1 (/2) + psrlw mm3, 1 ; - paddw mm5, mm0 ; sum += mm0 - pxor mm0, mm1 ; mm0 *= sign(mm0) - paddw mm5, mm3 ; - pxor mm3, mm4 ; - psubw mm0, mm1 ; undisplace - psubw mm3, mm4 - - movq [edi + 8*ecx], mm0 - movq [edi + 8*ecx + 8], mm3 - - add ecx,2 - cmp ecx,16 - jnz near .q1loop + paddw mm5, mm0 ; sum += mm0 + pxor mm0, mm1 ; mm0 *= sign(mm0) + paddw mm5, mm3 ; + pxor mm3, mm4 ; + psubw mm0, mm1 ; undisplace + psubw mm3, mm4 + + movq [edi + 8*ecx], mm0 + movq [edi + 8*ecx + 8], mm3 + + add ecx, 2 + cmp ecx, 16 + jnz near .q1loop - jmp .done + jmp .done align ALIGN .q2loop - movq mm0, [esi + 8*ecx] ; mm0 = [1st] - movq mm3, [esi + 8*ecx+ 8] + movq mm0, [esi + 8*ecx] ; mm0 = [1st] + movq mm3, [esi + 8*ecx+ 8] ; - pxor mm1, mm1 ; mm1 = 0 - pxor mm4, mm4 ; + pxor mm1, mm1 ; mm1 = 0 + pxor mm4, mm4 ; - pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) - pcmpgtw mm4, mm3 ; + pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) + pcmpgtw mm4, mm3 ; - pxor mm0, mm1 ; mm0 = |mm0| - pxor mm3, mm4 ; - psubw mm0, mm1 ; displace - psubw mm3, mm4 ; + pxor mm0, mm1 ; mm0 = |mm0| + pxor mm3, mm4 ; + psubw mm0, mm1 ; displace + psubw mm3, mm4 ; - psllw mm0, 4 - psllw mm3, 4 + psllw mm0, 4 + psllw mm3, 4 - movq mm2, [inter_matrix + 8*ecx] - psrlw mm2, 1 - paddw mm0, mm2 + movq mm2, [inter_matrix + 8*ecx] + psrlw mm2, 1 + paddw mm0, mm2 - movq mm2, [inter_matrix_fix + ecx*8] - pmulhw mm0, mm2 ; (level<<4 + inter_matrix[i]>>1) / inter_matrix[i] + movq mm2, [inter_matrix_fix + ecx*8] + pmulhw mm0, mm2 ; (level<<4 + inter_matrix[i]>>1) / inter_matrix[i] - movq mm2, [inter_matrix + 8*ecx + 8] - psrlw mm2, 1 - paddw mm3, mm2 + movq mm2, [inter_matrix + 8*ecx + 8] + psrlw mm2, 1 + paddw mm3, mm2 - movq mm2, [inter_matrix_fix + ecx*8 + 8] - pmulhw mm3, mm2 + movq mm2, [inter_matrix_fix + ecx*8 + 8] + pmulhw mm3, mm2 - psrlw mm0, 2 ; mm0 >>= 1 (/2) - psrlw mm3, 2 ; + psrlw mm0, 2 ; mm0 >>= 1 (/2) + psrlw mm3, 2 ; - paddw mm5, mm0 ; sum += mm0 - pxor mm0, mm1 ; mm0 *= sign(mm0) - paddw mm5, mm3 ; - pxor mm3, mm4 ; - psubw mm0, mm1 ; undisplace - psubw mm3, mm4 + paddw mm5, mm0 ; sum += mm0 + pxor mm0, mm1 ; mm0 *= sign(mm0) + paddw mm5, mm3 ; + pxor mm3, mm4 ; + psubw mm0, mm1 ; undisplace + psubw mm3, mm4 - movq [edi + 8*ecx], mm0 - movq [edi + 8*ecx + 8], mm3 + movq [edi + 8*ecx], mm0 + movq [edi + 8*ecx + 8], mm3 - add ecx,2 - cmp ecx,16 - jnz near .q2loop + add ecx, 2 + cmp ecx, 16 + jnz near .q2loop - jmp .done + jmp .done ;=========================================================================== ; -; void dequant4_intra_mmx(int16_t *data, -; const int16_t const *coeff, -; const uint32_t quant, -; const uint32_t dcscalar); +; uint32_t dequant_mpeg_intra_mmx(int16_t *data, +; const int16_t const *coeff, +; const uint32_t quant, +; const uint32_t dcscalar); ; ;=========================================================================== @@ -689,29 +588,29 @@ ; The original loop is: ; %if 0 - movq mm0, [ecx+8*eax + 8*16] ; mm0 = coeff[i] - pxor mm1, mm1 - pcmpgtw mm1, mm0 - pxor mm0, mm1 ; change sign if negative - psubw mm0, mm1 ; -> mm0 = abs(coeff[i]), mm1 = sign of coeff[i] - - movq mm2, mm7 ; mm2 = quant - pmullw mm2, [intra_matrix + 8*eax + 8*16 ] ; matrix[i]*quant. - - movq mm6, mm2 - pmulhw mm2, mm0 ; high of coeff*(matrix*quant) (should be 0 if no overflow) - pmullw mm0, mm6 ; low of coeff*(matrix*quant) - - pxor mm5, mm5 - pcmpgtw mm2, mm5 ; otherflow? - psrlw mm2, 5 ; =0 if no clamp, 2047 otherwise - psrlw mm0, 5 - paddw mm0, mm1 ; start restoring sign - por mm0, mm2 ; saturate to 2047 if needed - pxor mm0, mm1 ; finish negating back + movq mm0, [ecx+8*eax + 8*16] ; mm0 = coeff[i] + pxor mm1, mm1 + pcmpgtw mm1, mm0 + pxor mm0, mm1 ; change sign if negative + psubw mm0, mm1 ; -> mm0 = abs(coeff[i]), mm1 = sign of coeff[i] + + movq mm2, mm7 ; mm2 = quant + pmullw mm2, [intra_matrix + 8*eax + 8*16 ] ; matrix[i]*quant. + + movq mm6, mm2 + pmulhw mm2, mm0 ; high of coeff*(matrix*quant) (should be 0 if no overflow) + pmullw mm0, mm6 ; low of coeff*(matrix*quant) + + pxor mm5, mm5 + pcmpgtw mm2, mm5 ; otherflow? + psrlw mm2, 5 ; =0 if no clamp, 2047 otherwise + psrlw mm0, 5 + paddw mm0, mm1 ; start restoring sign + por mm0, mm2 ; saturate to 2047 if needed + pxor mm0, mm1 ; finish negating back - movq [edx + 8*eax + 8*16], mm0 ; data[i] - add eax, 1 + movq [edx + 8*eax + 8*16], mm0 ; data[i] + add eax, 1 %endif ;******************************************************************** @@ -720,91 +619,92 @@ cglobal dequant_mpeg_intra_mmx dequant_mpeg_intra_mmx: - mov edx, [esp+4] ; data - mov ecx, [esp+8] ; coeff - mov eax, [esp+12] ; quant - - movq mm7, [mmx_mul_quant + eax*8 - 8] - mov eax, -16 ; to keep aligned, we regularly process coeff[0] - psllw mm7, 2 ; << 2. See comment. - pxor mm6, mm6 ; this is a NOP + mov edx, [esp+4] ; data + mov ecx, [esp+8] ; coeff + mov eax, [esp+12] ; quant + + movq mm7, [mmx_mul_quant + eax*8 - 8] + mov eax, -16 ; to keep aligned, we regularly process coeff[0] + psllw mm7, 2 ; << 2. See comment. + pxor mm6, mm6 ; this is a NOP align 16 .loop - movq mm0, [ecx+8*eax + 8*16] ; mm0 = c = coeff[i] - movq mm3, [ecx+8*eax + 8*16 +8]; mm3 = c' = coeff[i+1] - pxor mm1, mm1 - pxor mm4, mm4 - pcmpgtw mm1, mm0 ; mm1 = sgn(c) - movq mm2, mm7 ; mm2 = quant + movq mm0, [ecx+8*eax + 8*16] ; mm0 = c = coeff[i] + movq mm3, [ecx+8*eax + 8*16 +8]; mm3 = c' = coeff[i+1] + pxor mm1, mm1 + pxor mm4, mm4 + pcmpgtw mm1, mm0 ; mm1 = sgn(c) + movq mm2, mm7 ; mm2 = quant - pcmpgtw mm4, mm3 ; mm4 = sgn(c') - pmullw mm2, [intra_matrix + 8*eax + 8*16 ] ; matrix[i]*quant + pcmpgtw mm4, mm3 ; mm4 = sgn(c') + pmullw mm2, [intra_matrix + 8*eax + 8*16 ] ; matrix[i]*quant - pxor mm0, mm1 ; negate if negative - pxor mm3, mm4 ; negate if negative + pxor mm0, mm1 ; negate if negative + pxor mm3, mm4 ; negate if negative - psubw mm0, mm1 - psubw mm3, mm4 + psubw mm0, mm1 + psubw mm3, mm4 ; we're short on register, here. Poor pairing... - movq mm5, mm2 - pmullw mm2, mm0 ; low of coeff*(matrix*quant) + movq mm5, mm2 + pmullw mm2, mm0 ; low of coeff*(matrix*quant) - pmulhw mm0, mm5 ; high of coeff*(matrix*quant) - movq mm5, mm7 ; mm2 = quant + pmulhw mm0, mm5 ; high of coeff*(matrix*quant) + movq mm5, mm7 ; mm2 = quant - pmullw mm5, [intra_matrix + 8*eax + 8*16 +8] ; matrix[i+1]*quant + pmullw mm5, [intra_matrix + 8*eax + 8*16 +8] ; matrix[i+1]*quant - movq mm6, mm5 - add eax,2 ; z-flag will be tested later + movq mm6, mm5 + add eax,2 ; z-flag will be tested later - pmullw mm6, mm3 ; low of coeff*(matrix*quant) - pmulhw mm3, mm5 ; high of coeff*(matrix*quant) + pmullw mm6, mm3 ; low of coeff*(matrix*quant) + pmulhw mm3, mm5 ; high of coeff*(matrix*quant) - pcmpgtw mm0, [zero] - paddusw mm2, mm0 - psrlw mm2, 5 + pcmpgtw mm0, [zero] + paddusw mm2, mm0 + psrlw mm2, 5 - pcmpgtw mm3, [zero] - paddusw mm6, mm3 - psrlw mm6, 5 + pcmpgtw mm3, [zero] + paddusw mm6, mm3 + psrlw mm6, 5 - pxor mm2, mm1 ; start negating back - pxor mm6, mm4 ; start negating back + pxor mm2, mm1 ; start negating back + pxor mm6, mm4 ; start negating back - psubusw mm1, mm0 - psubusw mm4, mm3 + psubusw mm1, mm0 + psubusw mm4, mm3 - psubw mm2, mm1 ; finish negating back - psubw mm6, mm4 ; finish negating back + psubw mm2, mm1 ; finish negating back + psubw mm6, mm4 ; finish negating back - movq [edx + 8*eax + 8*16 -2*8 ], mm2 ; data[i] - movq [edx + 8*eax + 8*16 -2*8 +8], mm6 ; data[i+1] + movq [edx + 8*eax + 8*16 -2*8 ], mm2 ; data[i] + movq [edx + 8*eax + 8*16 -2*8 +8], mm6 ; data[i+1] - jnz near .loop + jnz near .loop ; deal with DC - movd mm0, [ecx] - pmullw mm0, [esp+16] ; dcscalar - movq mm2, [mmx_32767_minus_2047] - paddsw mm0, mm2 - psubsw mm0, mm2 - movq mm2, [mmx_32768_minus_2048] - psubsw mm0, mm2 - paddsw mm0, mm2 - movd eax, mm0 - mov [edx], ax - - ret + movd mm0, [ecx] + pmullw mm0, [esp+16] ; dcscalar + movq mm2, [mmx_32767_minus_2047] + paddsw mm0, mm2 + psubsw mm0, mm2 + movq mm2, [mmx_32768_minus_2048] + psubsw mm0, mm2 + paddsw mm0, mm2 + movd eax, mm0 + mov [edx], ax + + xor eax, eax + ret ;=========================================================================== ; -; void dequant4_inter_mmx(int16_t * data, -; const int16_t * const coeff, -; const uint32_t quant); +; uint32_t dequant_mpeg_inter_mmx(int16_t * data, +; const int16_t * const coeff, +; const uint32_t quant); ; ;=========================================================================== @@ -817,91 +717,91 @@ cglobal dequant_mpeg_inter_mmx dequant_mpeg_inter_mmx: - mov edx, [esp+ 4] ; data - mov ecx, [esp+ 8] ; coeff - mov eax, [esp+12] ; quant - movq mm7, [mmx_mul_quant + eax*8 - 8] - mov eax, -16 - paddw mm7, mm7 ; << 1 - pxor mm6, mm6 ; mismatch sum + mov edx, [esp+ 4] ; data + mov ecx, [esp+ 8] ; coeff + mov eax, [esp+12] ; quant + movq mm7, [mmx_mul_quant + eax*8 - 8] + mov eax, -16 + paddw mm7, mm7 ; << 1 + pxor mm6, mm6 ; mismatch sum align 16 .loop - movq mm0, [ecx+8*eax + 8*16 ] ; mm0 = coeff[i] - movq mm2, [ecx+8*eax + 8*16 +8] ; mm2 = coeff[i+1] - add eax,2 - - pxor mm1, mm1 - pxor mm3, mm3 - pcmpgtw mm1, mm0 ; mm1 = sgn(c) (preserved) - pcmpgtw mm3, mm2 ; mm3 = sgn(c') (preserved) - paddsw mm0, mm1 ; c += sgn(c) - paddsw mm2, mm3 ; c += sgn(c') - paddw mm0, mm0 ; c *= 2 - paddw mm2, mm2 ; c'*= 2 - - pxor mm4, mm4 - pxor mm5, mm5 - psubw mm4, mm0 ; -c - psubw mm5, mm2 ; -c' - psraw mm4, 16 ; mm4 = sgn(-c) - psraw mm5, 16 ; mm5 = sgn(-c') - psubsw mm0, mm4 ; c -= sgn(-c) - psubsw mm2, mm5 ; c' -= sgn(-c') - pxor mm0, mm1 ; finish changing sign if needed - pxor mm2, mm3 ; finish changing sign if needed + movq mm0, [ecx+8*eax + 8*16 ] ; mm0 = coeff[i] + movq mm2, [ecx+8*eax + 8*16 +8] ; mm2 = coeff[i+1] + add eax, 2 + + pxor mm1, mm1 + pxor mm3, mm3 + pcmpgtw mm1, mm0 ; mm1 = sgn(c) (preserved) + pcmpgtw mm3, mm2 ; mm3 = sgn(c') (preserved) + paddsw mm0, mm1 ; c += sgn(c) + paddsw mm2, mm3 ; c += sgn(c') + paddw mm0, mm0 ; c *= 2 + paddw mm2, mm2 ; c'*= 2 + + pxor mm4, mm4 + pxor mm5, mm5 + psubw mm4, mm0 ; -c + psubw mm5, mm2 ; -c' + psraw mm4, 16 ; mm4 = sgn(-c) + psraw mm5, 16 ; mm5 = sgn(-c') + psubsw mm0, mm4 ; c -= sgn(-c) + psubsw mm2, mm5 ; c' -= sgn(-c') + pxor mm0, mm1 ; finish changing sign if needed + pxor mm2, mm3 ; finish changing sign if needed ; we're short on register, here. Poor pairing... - movq mm4, mm7 ; (matrix*quant) - pmullw mm4, [inter_matrix + 8*eax + 8*16 -2*8] - movq mm5, mm4 - pmulhw mm5, mm0 ; high of c*(matrix*quant) - pmullw mm0, mm4 ; low of c*(matrix*quant) - - movq mm4, mm7 ; (matrix*quant) - pmullw mm4, [inter_matrix + 8*eax + 8*16 -2*8 + 8] - - pcmpgtw mm5, [zero] - paddusw mm0, mm5 - psrlw mm0, 5 - pxor mm0, mm1 ; start restoring sign - psubusw mm1, mm5 - - movq mm5, mm4 - pmulhw mm5, mm2 ; high of c*(matrix*quant) - pmullw mm2, mm4 ; low of c*(matrix*quant) - psubw mm0, mm1 ; finish restoring sign - - pcmpgtw mm5, [zero] - paddusw mm2, mm5 - psrlw mm2, 5 - pxor mm2, mm3 ; start restoring sign - psubusw mm3, mm5 - psubw mm2, mm3 ; finish restoring sign - - pxor mm6, mm0 ; mismatch control - movq [edx + 8*eax + 8*16 -2*8 ], mm0 ; data[i] - pxor mm6, mm2 ; mismatch control - movq [edx + 8*eax + 8*16 -2*8 +8], mm2 ; data[i+1] + movq mm4, mm7 ; (matrix*quant) + pmullw mm4, [inter_matrix + 8*eax + 8*16 -2*8] + movq mm5, mm4 + pmulhw mm5, mm0 ; high of c*(matrix*quant) + pmullw mm0, mm4 ; low of c*(matrix*quant) + + movq mm4, mm7 ; (matrix*quant) + pmullw mm4, [inter_matrix + 8*eax + 8*16 -2*8 + 8] + + pcmpgtw mm5, [zero] + paddusw mm0, mm5 + psrlw mm0, 5 + pxor mm0, mm1 ; start restoring sign + psubusw mm1, mm5 + + movq mm5, mm4 + pmulhw mm5, mm2 ; high of c*(matrix*quant) + pmullw mm2, mm4 ; low of c*(matrix*quant) + psubw mm0, mm1 ; finish restoring sign + + pcmpgtw mm5, [zero] + paddusw mm2, mm5 + psrlw mm2, 5 + pxor mm2, mm3 ; start restoring sign + psubusw mm3, mm5 + psubw mm2, mm3 ; finish restoring sign + + pxor mm6, mm0 ; mismatch control + movq [edx + 8*eax + 8*16 -2*8 ], mm0 ; data[i] + pxor mm6, mm2 ; mismatch control + movq [edx + 8*eax + 8*16 -2*8 +8], mm2 ; data[i+1] - jnz near .loop + jnz near .loop ; mismatch control - movq mm0, mm6 - psrlq mm0, 48 - movq mm1, mm6 - movq mm2, mm6 - psrlq mm1, 32 - pxor mm6, mm0 - psrlq mm2, 16 - pxor mm6, mm1 - pxor mm6, mm2 - movd eax, mm6 - and eax, 1 - xor eax, 1 - xor word [edx + 2*63], ax - - ret + movq mm0, mm6 + psrlq mm0, 48 + movq mm1, mm6 + movq mm2, mm6 + psrlq mm1, 32 + pxor mm6, mm0 + psrlq mm2, 16 + pxor mm6, mm1 + pxor mm6, mm2 + movd eax, mm6 + and eax, 1 + xor eax, 1 + xor word [edx + 2*63], ax + xor eax, eax + ret