--- trunk/xvidcore/src/utils/x86_asm/mem_transfer_3dne.asm 2008/11/14 15:43:28 1794 +++ trunk/xvidcore/src/utils/x86_asm/mem_transfer_3dne.asm 2008/11/26 01:04:34 1795 @@ -19,63 +19,41 @@ ; * along with this program ; if not, write to the Free Software ; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ; * -; * $Id: mem_transfer_3dne.asm,v 1.10 2008-11-11 20:46:24 Isibaar Exp $ +; * $Id: mem_transfer_3dne.asm,v 1.11 2008-11-26 01:04:34 Isibaar Exp $ ; * ; ***************************************************************************/ ; these 3dne functions are compatible with iSSE, but are optimized specifically ; for K7 pipelines -BITS 32 - -%macro cglobal 1 - %ifdef PREFIX - %ifdef MARK_FUNCS - global _%1:function %1.endfunc-%1 - %define %1 _%1:function %1.endfunc-%1 - %define ENDFUNC .endfunc - %else - global _%1 - %define %1 _%1 - %define ENDFUNC - %endif - %else - %ifdef MARK_FUNCS - global %1:function %1.endfunc-%1 - %define ENDFUNC .endfunc - %else - global %1 - %define ENDFUNC - %endif - %endif -%endmacro +%include "nasm.inc" ;============================================================================= ; Read only data ;============================================================================= -%ifdef FORMAT_COFF -SECTION .rodata -%else -SECTION .rodata align=16 -%endif +DATA -ALIGN 8 +ALIGN SECTION_ALIGN mm_zero: dd 0,0 ;============================================================================= ; Macros ;============================================================================= +%ifdef ARCH_IS_X86_64 +%define nop4 +%else %macro nop4 0 db 08Dh, 074h, 026h, 0 %endmacro +%endif ;============================================================================= ; Code ;============================================================================= -SECTION .text +SECTION .rotext align=SECTION_ALIGN cglobal transfer_8to16copy_3dne cglobal transfer_16to8copy_3dne @@ -94,64 +72,64 @@ ; ;----------------------------------------------------------------------------- -ALIGN 16 +ALIGN SECTION_ALIGN transfer_8to16copy_3dne: - mov eax, [esp+ 8] ; Src - mov edx, [esp+12] ; Stride - mov ecx, [esp+ 4] ; Dst - punpcklbw mm0, [byte eax] - punpcklbw mm1, [eax+4] - movq mm2, [eax+edx] - movq mm3, [eax+edx] + mov _EAX, prm2 ; Src + mov TMP1, prm3 ; Stride + mov TMP0, prm1 ; Dst + punpcklbw mm0, [byte _EAX] + punpcklbw mm1, [_EAX+4] + movq mm2, [_EAX+TMP1] + movq mm3, [_EAX+TMP1] pxor mm7, mm7 - lea eax, [eax+2*edx] + lea _EAX, [_EAX+2*TMP1] punpcklbw mm2, mm7 punpckhbw mm3, mm7 psrlw mm0, 8 psrlw mm1, 8 - punpcklbw mm4, [eax] - punpcklbw mm5, [eax+edx+4] - movq [byte ecx+0*64], mm0 - movq [ecx+0*64+8], mm1 - punpcklbw mm6, [eax+edx] - punpcklbw mm7, [eax+4] - lea eax, [byte eax+2*edx] + punpcklbw mm4, [_EAX] + punpcklbw mm5, [_EAX+TMP1+4] + movq [byte TMP0+0*64], mm0 + movq [TMP0+0*64+8], mm1 + punpcklbw mm6, [_EAX+TMP1] + punpcklbw mm7, [_EAX+4] + lea _EAX, [byte _EAX+2*TMP1] psrlw mm4, 8 psrlw mm5, 8 - punpcklbw mm0, [eax] - punpcklbw mm1, [eax+edx+4] - movq [ecx+0*64+16], mm2 - movq [ecx+0*64+24], mm3 + punpcklbw mm0, [_EAX] + punpcklbw mm1, [_EAX+TMP1+4] + movq [TMP0+0*64+16], mm2 + movq [TMP0+0*64+24], mm3 psrlw mm6, 8 psrlw mm7, 8 - punpcklbw mm2, [eax+edx] - punpcklbw mm3, [eax+4] - lea eax, [byte eax+2*edx] - movq [byte ecx+0*64+32], mm4 - movq [ecx+0*64+56], mm5 + punpcklbw mm2, [_EAX+TMP1] + punpcklbw mm3, [_EAX+4] + lea _EAX, [byte _EAX+2*TMP1] + movq [byte TMP0+0*64+32], mm4 + movq [TMP0+0*64+56], mm5 psrlw mm0, 8 psrlw mm1, 8 - punpcklbw mm4, [eax] - punpcklbw mm5, [eax+edx+4] - movq [byte ecx+0*64+48], mm6 - movq [ecx+0*64+40], mm7 + punpcklbw mm4, [_EAX] + punpcklbw mm5, [_EAX+TMP1+4] + movq [byte TMP0+0*64+48], mm6 + movq [TMP0+0*64+40], mm7 psrlw mm2, 8 psrlw mm3, 8 - punpcklbw mm6, [eax+edx] - punpcklbw mm7, [eax+4] - movq [byte ecx+1*64], mm0 - movq [ecx+1*64+24], mm1 + punpcklbw mm6, [_EAX+TMP1] + punpcklbw mm7, [_EAX+4] + movq [byte TMP0+1*64], mm0 + movq [TMP0+1*64+24], mm1 psrlw mm4, 8 psrlw mm5, 8 - movq [ecx+1*64+16], mm2 - movq [ecx+1*64+8], mm3 + movq [TMP0+1*64+16], mm2 + movq [TMP0+1*64+8], mm3 psrlw mm6, 8 psrlw mm7, 8 - movq [byte ecx+1*64+32], mm4 - movq [ecx+1*64+56], mm5 - movq [byte ecx+1*64+48], mm6 - movq [ecx+1*64+40], mm7 + movq [byte TMP0+1*64+32], mm4 + movq [TMP0+1*64+56], mm5 + movq [byte TMP0+1*64+48], mm6 + movq [TMP0+1*64+40], mm7 ret ENDFUNC @@ -164,40 +142,40 @@ ; ;----------------------------------------------------------------------------- -ALIGN 16 +ALIGN SECTION_ALIGN transfer_16to8copy_3dne: - mov eax, [esp+ 8] ; Src - mov ecx, [esp+ 4] ; Dst - mov edx, [esp+12] ; Stride - - movq mm0, [byte eax+0*32] - packuswb mm0, [eax+0*32+8] - movq mm1, [eax+0*32+16] - packuswb mm1, [eax+0*32+24] - movq mm5, [eax+2*32+16] - movq mm2, [eax+1*32] - packuswb mm2, [eax+1*32+8] - movq mm3, [eax+1*32+16] - packuswb mm3, [eax+1*32+24] - movq mm6, [eax+3*32] - movq mm4, [eax+2*32] - packuswb mm4, [eax+2*32+8] - packuswb mm5, [eax+2*32+24] - movq mm7, [eax+3*32+16] - packuswb mm7, [eax+3*32+24] - packuswb mm6, [eax+3*32+8] - movq [ecx], mm0 - lea eax, [3*edx] - add eax, ecx - movq [ecx+edx], mm1 - movq [ecx+2*edx], mm2 - movq [byte eax], mm3 - movq [ecx+4*edx], mm4 - lea ecx, [byte ecx+4*edx] - movq [eax+2*edx], mm5 - movq [eax+4*edx], mm7 - movq [ecx+2*edx], mm6 + mov _EAX, prm2 ; Src + mov TMP0, prm1 ; Dst + mov TMP1, prm3 ; Stride + + movq mm0, [byte _EAX+0*32] + packuswb mm0, [_EAX+0*32+8] + movq mm1, [_EAX+0*32+16] + packuswb mm1, [_EAX+0*32+24] + movq mm5, [_EAX+2*32+16] + movq mm2, [_EAX+1*32] + packuswb mm2, [_EAX+1*32+8] + movq mm3, [_EAX+1*32+16] + packuswb mm3, [_EAX+1*32+24] + movq mm6, [_EAX+3*32] + movq mm4, [_EAX+2*32] + packuswb mm4, [_EAX+2*32+8] + packuswb mm5, [_EAX+2*32+24] + movq mm7, [_EAX+3*32+16] + packuswb mm7, [_EAX+3*32+24] + packuswb mm6, [_EAX+3*32+8] + movq [TMP0], mm0 + lea _EAX, [3*TMP1] + add _EAX, TMP0 + movq [TMP0+TMP1], mm1 + movq [TMP0+2*TMP1], mm2 + movq [byte _EAX], mm3 + movq [TMP0+4*TMP1], mm4 + lea TMP0, [byte TMP0+4*TMP1] + movq [_EAX+2*TMP1], mm5 + movq [_EAX+4*TMP1], mm7 + movq [TMP0+2*TMP1], mm6 ret ENDFUNC @@ -210,82 +188,94 @@ ; ;----------------------------------------------------------------------------- -; when second argument == 1, reference (ebx) block is to current (eax) +; when second argument == 1, reference (ebx) block is to current (_EAX) %macro COPY_8_TO_16_SUB 2 - movq mm1, [eax] ; cur + movq mm1, [_EAX] ; cur movq mm0, mm1 - movq mm4, [ecx] ; ref + movq mm4, [TMP0] ; ref movq mm6, mm4 %if %2 == 1 - movq [eax], mm4 + movq [_EAX], mm4 %endif punpckhbw mm1, mm7 punpckhbw mm6, mm7 punpcklbw mm4, mm7 -ALIGN 8 - movq mm2, [byte eax+edx] +ALIGN SECTION_ALIGN + movq mm2, [byte _EAX+TMP1] punpcklbw mm0, mm7 - movq mm3, [byte eax+edx] + movq mm3, [byte _EAX+TMP1] punpcklbw mm2, mm7 - movq mm5, [byte ecx+edx] ; ref + movq mm5, [byte TMP0+TMP1] ; ref punpckhbw mm3, mm7 %if %2 == 1 - movq [byte eax+edx], mm5 + movq [byte _EAX+TMP1], mm5 %endif psubsw mm1, mm6 movq mm6, mm5 psubsw mm0, mm4 %if (%1 < 3) - lea eax,[eax+2*edx] - lea ecx,[ecx+2*edx] + lea _EAX,[_EAX+2*TMP1] + lea TMP0,[TMP0+2*TMP1] %else - mov ecx,[esp] - add esp,byte 4 + mov TMP0,[_ESP] + add _ESP,byte PTR_SIZE %endif - movq [edi+%1*32+ 8], mm1 - movq [byte edi+%1*32+ 0], mm0 ; dst + movq [_EDI+%1*32+ 8], mm1 + movq [byte _EDI+%1*32+ 0], mm0 ; dst punpcklbw mm5, mm7 punpckhbw mm6, mm7 psubsw mm2, mm5 psubsw mm3, mm6 - movq [edi+%1*32+16], mm2 - movq [edi+%1*32+24], mm3 + movq [_EDI+%1*32+16], mm2 + movq [_EDI+%1*32+24], mm3 %endmacro -ALIGN 16 +ALIGN SECTION_ALIGN transfer_8to16sub_3dne: - mov eax, [esp + 8] ; Cur - mov ecx, [esp +12] ; Ref - push edi - mov edx, [esp+4+16] ; Stride - mov edi, [esp+4+ 4] ; Dst + mov _EAX, prm2 ; Cur + mov TMP0, prm3 ; Ref + mov TMP1, prm4 ; Stride + + push _EDI +%ifdef ARCH_IS_X86_64 + mov _EDI, prm1 +%else + mov _EDI, [_ESP+4+4] ; Dst +%endif + pxor mm7, mm7 nop -ALIGN 4 +ALIGN SECTION_ALIGN COPY_8_TO_16_SUB 0, 1 COPY_8_TO_16_SUB 1, 1 COPY_8_TO_16_SUB 2, 1 COPY_8_TO_16_SUB 3, 1 - mov edi, ecx + mov _EDI, TMP0 ret ENDFUNC -ALIGN 16 +ALIGN SECTION_ALIGN transfer_8to16subro_3dne: - mov eax, [esp + 8] ; Cur - mov ecx, [esp +12] ; Ref - push edi - mov edx, [esp+4+16] ; Stride - mov edi, [esp+4+ 4] ; Dst + mov _EAX, prm2 ; Cur + mov TMP0, prm3 ; Ref + mov TMP1, prm4 ; Stride + + push _EDI +%ifdef ARCH_IS_X86_64 + mov _EDI, prm1 +%else + mov _EDI, [_ESP+4+ 4] ; Dst +%endif + pxor mm7, mm7 nop -ALIGN 4 +ALIGN SECTION_ALIGN COPY_8_TO_16_SUB 0, 0 COPY_8_TO_16_SUB 1, 0 COPY_8_TO_16_SUB 2, 0 COPY_8_TO_16_SUB 3, 0 - mov edi, ecx + mov _EDI, TMP0 ret ENDFUNC @@ -301,32 +291,32 @@ ;----------------------------------------------------------------------------- %macro COPY_8_TO_16_SUB2_SSE 1 - db 0Fh, 6Fh, 44h, 20h, 00 ;movq mm0, [byte eax] ; cur + db 0Fh, 6Fh, 44h, 20h, 00 ;movq mm0, [byte _EAX] ; cur punpcklbw mm0, mm7 - movq mm2, [byte eax+edx] + movq mm2, [byte _EAX+TMP1] punpcklbw mm2, mm7 - db 0Fh, 6Fh, 4ch, 20h, 00 ;movq mm1, [byte eax] + db 0Fh, 6Fh, 4ch, 20h, 00 ;movq mm1, [byte _EAX] punpckhbw mm1, mm7 - movq mm3, [byte eax+edx] + movq mm3, [byte _EAX+TMP1] punpckhbw mm3, mm7 - movq mm4, [byte ebx] ; ref1 - pavgb mm4, [byte esi] ; ref2 - movq [eax], mm4 - movq mm5, [ebx+edx] ; ref - pavgb mm5, [esi+edx] ; ref2 - movq [eax+edx], mm5 + movq mm4, [byte _EBX] ; ref1 + pavgb mm4, [byte _ESI] ; ref2 + movq [_EAX], mm4 + movq mm5, [_EBX+TMP1] ; ref + pavgb mm5, [_ESI+TMP1] ; ref2 + movq [_EAX+TMP1], mm5 movq mm6, mm4 punpcklbw mm4, mm7 punpckhbw mm6, mm7 %if (%1 < 3) - lea esi,[esi+2*edx] - lea ebx,[byte ebx+2*edx] - lea eax,[eax+2*edx] + lea _ESI,[_ESI+2*TMP1] + lea _EBX,[byte _EBX+2*TMP1] + lea _EAX,[_EAX+2*TMP1] %else - mov esi,[esp] - mov ebx,[esp+4] - add esp,byte 8 + mov _ESI,[_ESP] + mov _EBX,[_ESP+PTR_SIZE] + add _ESP,byte 2*PTR_SIZE %endif psubsw mm0, mm4 psubsw mm1, mm6 @@ -335,23 +325,35 @@ punpckhbw mm6, mm7 psubsw mm2, mm5 psubsw mm3, mm6 - movq [byte ecx+%1*32+ 0], mm0 ; dst - movq [ecx+%1*32+ 8], mm1 - movq [ecx+%1*32+16], mm2 - movq [ecx+%1*32+24], mm3 + movq [byte TMP0+%1*32+ 0], mm0 ; dst + movq [TMP0+%1*32+ 8], mm1 + movq [TMP0+%1*32+16], mm2 + movq [TMP0+%1*32+24], mm3 %endmacro -ALIGN 16 +ALIGN SECTION_ALIGN transfer_8to16sub2_3dne: - mov edx, [esp +20] ; Stride - mov ecx, [esp + 4] ; Dst - mov eax, [esp + 8] ; Cur - push ebx - lea ebp,[byte ebp] - mov ebx, [esp+4+12] ; Ref1 - push esi + mov TMP1d, prm5d ; Stride + mov TMP0, prm1 ; Dst + mov _EAX, prm2 ; Cur + push _EBX + lea _EBP,[byte _EBP] + +%ifdef ARCH_IS_X86_64 + mov _EBX, prm3 +%else + mov _EBX, [_ESP+4+12] ; Ref1 +%endif + + push _ESI pxor mm7, mm7 - mov esi, [esp+8+16] ; Ref2 + +%ifdef ARCH_IS_X86_64 + mov _ESI, prm4 +%else + mov _ESI, [_ESP+8+16] ; Ref2 +%endif + nop4 COPY_8_TO_16_SUB2_SSE 0 COPY_8_TO_16_SUB2_SSE 1 @@ -371,40 +373,40 @@ ;----------------------------------------------------------------------------- %macro COPY_16_TO_8_ADD 1 - db 0Fh, 6Fh, 44h, 21h, 00 ;movq mm0, [byte ecx] + movq mm0, [byte TMP0] punpcklbw mm0, mm7 - movq mm2, [byte ecx+edx] + movq mm2, [byte TMP0+TMP1] punpcklbw mm2, mm7 - db 0Fh, 6Fh, 4ch, 21h, 00 ;movq mm1, [byte ecx] + movq mm1, [byte TMP0] punpckhbw mm1, mm7 - movq mm3, [byte ecx+edx] + movq mm3, [byte TMP0+TMP1] punpckhbw mm3, mm7 - paddsw mm0, [byte eax+%1*32+ 0] - paddsw mm1, [eax+%1*32+ 8] - paddsw mm2, [eax+%1*32+16] - paddsw mm3, [eax+%1*32+24] + paddsw mm0, [byte _EAX+%1*32+ 0] + paddsw mm1, [_EAX+%1*32+ 8] + paddsw mm2, [_EAX+%1*32+16] + paddsw mm3, [_EAX+%1*32+24] packuswb mm0, mm1 packuswb mm2, mm3 - mov esp,esp - movq [byte ecx], mm0 - movq [ecx+edx], mm2 + mov _ESP, _ESP + movq [byte TMP0], mm0 + movq [TMP0+TMP1], mm2 %endmacro -ALIGN 16 +ALIGN SECTION_ALIGN transfer_16to8add_3dne: - mov ecx, [esp+ 4] ; Dst - mov edx, [esp+12] ; Stride - mov eax, [esp+ 8] ; Src + mov TMP0, prm1 ; Dst + mov TMP1, prm3 ; Stride + mov _EAX, prm2 ; Src pxor mm7, mm7 nop COPY_16_TO_8_ADD 0 - lea ecx,[byte ecx+2*edx] + lea TMP0,[byte TMP0+2*TMP1] COPY_16_TO_8_ADD 1 - lea ecx,[byte ecx+2*edx] + lea TMP0,[byte TMP0+2*TMP1] COPY_16_TO_8_ADD 2 - lea ecx,[byte ecx+2*edx] + lea TMP0,[byte TMP0+2*TMP1] COPY_16_TO_8_ADD 3 ret ENDFUNC @@ -419,25 +421,25 @@ ;----------------------------------------------------------------------------- %macro COPY_8_TO_8 0 - movq mm0, [byte eax] - movq mm1, [eax+edx] - movq [byte ecx], mm0 - lea eax,[byte eax+2*edx] - movq [ecx+edx], mm1 + movq mm0, [byte _EAX] + movq mm1, [_EAX+TMP1] + movq [byte TMP0], mm0 + lea _EAX,[byte _EAX+2*TMP1] + movq [TMP0+TMP1], mm1 %endmacro -ALIGN 16 +ALIGN SECTION_ALIGN transfer8x8_copy_3dne: - mov eax, [esp+ 8] ; Src - mov edx, [esp+12] ; Stride - mov ecx, [esp+ 4] ; Dst + mov _EAX, prm2 ; Src + mov TMP1, prm3 ; Stride + mov TMP0, prm1 ; Dst COPY_8_TO_8 - lea ecx,[byte ecx+2*edx] + lea TMP0,[byte TMP0+2*TMP1] COPY_8_TO_8 - lea ecx,[byte ecx+2*edx] + lea TMP0,[byte TMP0+2*TMP1] COPY_8_TO_8 - lea ecx,[byte ecx+2*edx] + lea TMP0,[byte TMP0+2*TMP1] COPY_8_TO_8 ret ENDFUNC @@ -451,14 +453,14 @@ ; ;----------------------------------------------------------------------------- -ALIGN 16 +ALIGN SECTION_ALIGN transfer8x4_copy_3dne: - mov eax, [esp+ 8] ; Src - mov edx, [esp+12] ; Stride - mov ecx, [esp+ 4] ; Dst + mov _EAX, prm2 ; Src + mov TMP1, prm3 ; Stride + mov TMP0, prm1 ; Dst COPY_8_TO_8 - lea ecx,[byte ecx+2*edx] + lea TMP0,[byte TMP0+2*TMP1] COPY_8_TO_8 ret ENDFUNC