5 |
; * |
; * |
6 |
; * Copyright(C) 2001-2003 Peter Ross <pross@xvid.org> |
; * Copyright(C) 2001-2003 Peter Ross <pross@xvid.org> |
7 |
; * 2002-2003 Pascal Massimino <skal@planet-d.net> |
; * 2002-2003 Pascal Massimino <skal@planet-d.net> |
8 |
|
; * 2004 Jean-Marc Bastide <jmtest@voila.fr> |
9 |
; * |
; * |
10 |
; * This program is free software ; you can redistribute it and/or modify |
; * This program is free software ; you can redistribute it and/or modify |
11 |
; * it under the terms of the GNU General Public License as published by |
; * it under the terms of the GNU General Public License as published by |
21 |
; * along with this program ; if not, write to the Free Software |
; * along with this program ; if not, write to the Free Software |
22 |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
23 |
; * |
; * |
24 |
; * $Id: quantize_h263_mmx.asm,v 1.2 2004-03-22 22:36:24 edgomez Exp $ |
; * $Id: quantize_h263_mmx.asm,v 1.6 2004-08-22 11:46:10 edgomez Exp $ |
25 |
; * |
; * |
26 |
; ****************************************************************************/ |
; ****************************************************************************/ |
27 |
|
|
32 |
|
|
33 |
%macro cglobal 1 |
%macro cglobal 1 |
34 |
%ifdef PREFIX |
%ifdef PREFIX |
35 |
|
%ifdef MARK_FUNCS |
36 |
|
global _%1:function |
37 |
|
%define %1 _%1:function |
38 |
|
%else |
39 |
global _%1 |
global _%1 |
40 |
%define %1 _%1 |
%define %1 _%1 |
41 |
|
%endif |
42 |
|
%else |
43 |
|
%ifdef MARK_FUNCS |
44 |
|
global %1:function |
45 |
%else |
%else |
46 |
global %1 |
global %1 |
47 |
%endif |
%endif |
48 |
|
%endif |
49 |
%endmacro |
%endmacro |
50 |
|
|
51 |
;============================================================================= |
;============================================================================= |
53 |
;============================================================================= |
;============================================================================= |
54 |
|
|
55 |
%ifdef FORMAT_COFF |
%ifdef FORMAT_COFF |
56 |
SECTION .rodata data |
SECTION .rodata |
57 |
%else |
%else |
58 |
SECTION .rodata data align=16 |
SECTION .rodata align=16 |
59 |
%endif |
%endif |
60 |
|
|
61 |
ALIGN 16 |
ALIGN 16 |
64 |
|
|
65 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
66 |
; |
; |
67 |
; subtract by Q/2 table |
; quant table |
68 |
; |
; |
69 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
70 |
|
|
71 |
ALIGN 16 |
ALIGN 16 |
72 |
mmx_sub: |
mmx_quant: |
73 |
%assign quant 1 |
%assign quant 0 |
74 |
%rep 31 |
%rep 32 |
75 |
times 4 dw quant / 2 |
times 4 dw quant |
76 |
%assign quant quant+1 |
%assign quant quant+1 |
77 |
%endrep |
%endrep |
78 |
|
|
79 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
80 |
; |
; |
81 |
; divide by 2Q table |
; subtract by Q/2 table |
|
; |
|
|
; use a shift of 16 to take full advantage of _pmulhw_ |
|
|
; for q=1, _pmulhw_ will overflow so it is treated seperately |
|
|
; (3dnow2 provides _pmulhuw_ which wont cause overflow) |
|
82 |
; |
; |
83 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
84 |
|
|
85 |
ALIGN 16 |
ALIGN 16 |
86 |
mmx_div: |
mmx_sub: |
87 |
%assign quant 1 |
%assign quant 1 |
88 |
%rep 31 |
%rep 31 |
89 |
times 4 dw (1<<16) / (quant*2) + 1 |
times 4 dw quant / 2 |
90 |
%assign quant quant+1 |
%assign quant quant+1 |
91 |
%endrep |
%endrep |
92 |
|
|
93 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
94 |
; |
; |
95 |
; add by (odd(Q) ? Q : Q - 1) table |
; divide by 2Q table |
|
; |
|
|
;----------------------------------------------------------------------------- |
|
|
|
|
|
ALIGN 16 |
|
|
mmx_add: |
|
|
%assign quant 1 |
|
|
%rep 31 |
|
|
%if quant % 2 != 0 |
|
|
times 4 dw quant |
|
|
%else |
|
|
times 4 dw quant - 1 |
|
|
%endif |
|
|
%assign quant quant+1 |
|
|
%endrep |
|
|
|
|
|
;----------------------------------------------------------------------------- |
|
96 |
; |
; |
97 |
; multiple by 2Q table |
; use a shift of 16 to take full advantage of _pmulhw_ |
98 |
|
; for q=1, _pmulhw_ will overflow so it is treated seperately |
99 |
|
; (3dnow2 provides _pmulhuw_ which wont cause overflow) |
100 |
; |
; |
101 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
102 |
|
|
103 |
ALIGN 16 |
ALIGN 16 |
104 |
mmx_mul: |
mmx_div: |
105 |
%assign quant 1 |
%assign quant 1 |
106 |
%rep 31 |
%rep 31 |
107 |
times 4 dw quant*2 |
times 4 dw (1<<16) / (quant*2) + 1 |
108 |
%assign quant quant+1 |
%assign quant quant+1 |
109 |
%endrep |
%endrep |
110 |
|
|
|
;----------------------------------------------------------------------------- |
|
|
; |
|
|
; saturation limits |
|
|
; |
|
|
;----------------------------------------------------------------------------- |
|
|
|
|
|
ALIGN 16 |
|
|
sse2_2047: |
|
|
times 8 dw 2047 |
|
|
|
|
|
ALIGN 16 |
|
|
mmx_2047: |
|
|
times 4 dw 2047 |
|
|
|
|
|
ALIGN 8 |
|
|
mmx_32768_minus_2048: |
|
|
times 4 dw (32768-2048) |
|
|
|
|
|
mmx_32767_minus_2047: |
|
|
times 4 dw (32767-2047) |
|
|
|
|
|
|
|
111 |
;============================================================================= |
;============================================================================= |
112 |
; Code |
; Code |
113 |
;============================================================================= |
;============================================================================= |
138 |
ALIGN 16 |
ALIGN 16 |
139 |
quant_h263_intra_mmx: |
quant_h263_intra_mmx: |
140 |
|
|
|
push ecx |
|
141 |
push esi |
push esi |
|
push edi |
|
|
|
|
|
mov edi, [esp + 12 + 4] ; coeff |
|
|
mov esi, [esp + 12 + 8] ; data |
|
|
mov eax, [esp + 12 + 12] ; quant |
|
|
|
|
|
xor ecx, ecx |
|
|
cmp al, 1 |
|
|
jz .q1loop |
|
|
|
|
|
movq mm7, [mmx_div + eax * 8 - 8] |
|
|
|
|
|
ALIGN 16 |
|
|
.loop |
|
|
movq mm0, [esi + 8*ecx] ; mm0 = [1st] |
|
|
movq mm3, [esi + 8*ecx + 8] |
|
|
pxor mm1, mm1 ; mm1 = 0 |
|
|
pxor mm4, mm4 ; |
|
|
pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) |
|
|
pcmpgtw mm4, mm3 ; |
|
|
pxor mm0, mm1 ; mm0 = |mm0| |
|
|
pxor mm3, mm4 ; |
|
|
psubw mm0, mm1 ; displace |
|
|
psubw mm3, mm4 ; |
|
|
pmulhw mm0, mm7 ; mm0 = (mm0 / 2Q) >> 16 |
|
|
pmulhw mm3, mm7 ; |
|
|
pxor mm0, mm1 ; mm0 *= sign(mm0) |
|
|
pxor mm3, mm4 ; |
|
|
psubw mm0, mm1 ; undisplace |
|
|
psubw mm3, mm4 ; |
|
|
movq [edi + 8*ecx], mm0 |
|
|
movq [edi + 8*ecx + 8], mm3 |
|
142 |
|
|
143 |
add ecx, 2 |
mov esi, [esp + 4 + 8] ; data |
144 |
cmp ecx, 16 |
mov ecx,[esp + 4 + 16] ; dcscalar |
|
jnz .loop |
|
|
|
|
|
.done |
|
|
|
|
|
; caclulate data[0] // (int32_t)dcscalar) |
|
|
mov ecx, [esp + 12 + 16] ; dcscalar |
|
|
mov edx, ecx |
|
145 |
movsx eax, word [esi] ; data[0] |
movsx eax, word [esi] ; data[0] |
|
shr edx, 1 ; edx = dcscalar /2 |
|
|
cmp eax, 0 |
|
|
jg .gtzero |
|
146 |
|
|
147 |
|
sar ecx,1 ; dcscalar /2 |
148 |
|
mov edx,eax |
149 |
|
sar edx,31 ; sgn(data[0]) |
150 |
|
xor ecx,edx ; *sgn(data[0]) |
151 |
sub eax, edx |
sub eax, edx |
152 |
jmp short .mul |
add eax,ecx ; + (dcscalar/2)*sgn(data[0]) |
153 |
|
|
154 |
.gtzero |
mov ecx, [esp + 4 + 12] ; quant |
155 |
add eax, edx |
cdq |
156 |
.mul |
idiv dword [esp + 4 + 16] ; dcscalar |
157 |
cdq ; expand eax -> edx:eax |
cmp ecx, 1 |
158 |
idiv ecx ; eax = edx:eax / dcscalar |
mov edx, [esp + 4 + 4] ; coeff |
159 |
mov [edi], ax ; coeff[0] = ax |
je .low |
|
|
|
|
xor eax, eax ; return(0); |
|
|
pop edi |
|
|
pop esi |
|
|
pop ecx |
|
160 |
|
|
161 |
ret |
movq mm7, [mmx_div+ecx * 8 - 8] |
162 |
|
mov ecx,4 |
163 |
|
|
164 |
ALIGN 16 |
.loop |
165 |
.q1loop |
movq mm0, [esi] ; data |
166 |
movq mm0, [esi + 8*ecx] ; mm0 = [1st] |
pxor mm4,mm4 |
167 |
movq mm3, [esi + 8*ecx + 8] |
movq mm1, [esi + 8] |
168 |
pxor mm1, mm1 ; mm1 = 0 |
pcmpgtw mm4,mm0 ; (data<0) |
169 |
pxor mm4, mm4 ; |
pxor mm5,mm5 |
170 |
pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) |
pmulhw mm0,mm7 ; /(2*quant) |
171 |
pcmpgtw mm4, mm3 ; |
pcmpgtw mm5,mm1 |
172 |
pxor mm0, mm1 ; mm0 = |mm0| |
movq mm2, [esi+16] |
173 |
pxor mm3, mm4 ; |
psubw mm0,mm4 ; +(data<0) |
174 |
psubw mm0, mm1 ; displace |
pmulhw mm1,mm7 |
175 |
psubw mm3, mm4 ; |
pxor mm4,mm4 |
176 |
psrlw mm0, 1 ; mm0 >>= 1 (/2) |
movq mm3,[esi+24] |
177 |
psrlw mm3, 1 ; |
pcmpgtw mm4,mm2 |
178 |
pxor mm0, mm1 ; mm0 *= sign(mm0) |
psubw mm1,mm5 |
179 |
pxor mm3, mm4 |
pmulhw mm2,mm7 |
180 |
psubw mm0, mm1 ; undisplace |
pxor mm5,mm5 |
181 |
psubw mm3, mm4 ; |
pcmpgtw mm5,mm3 |
182 |
movq [edi + 8*ecx], mm0 |
pmulhw mm3,mm7 |
183 |
movq [edi + 8*ecx + 8], mm3 |
psubw mm2,mm4 |
184 |
|
psubw mm3,mm5 |
185 |
|
movq [edx], mm0 |
186 |
|
lea esi, [esi+32] |
187 |
|
movq [edx + 8], mm1 |
188 |
|
movq [edx + 16], mm2 |
189 |
|
movq [edx + 24], mm3 |
190 |
|
|
191 |
|
dec ecx |
192 |
|
lea edx, [edx+32] |
193 |
|
jne .loop |
194 |
|
jmp .end |
195 |
|
|
196 |
|
.low |
197 |
|
movd mm7,ecx |
198 |
|
mov ecx,4 |
199 |
|
.loop_low |
200 |
|
movq mm0, [esi] |
201 |
|
pxor mm4,mm4 |
202 |
|
movq mm1, [esi + 8] |
203 |
|
pcmpgtw mm4,mm0 |
204 |
|
pxor mm5,mm5 |
205 |
|
psubw mm0,mm4 |
206 |
|
pcmpgtw mm5,mm1 |
207 |
|
psraw mm0,mm7 |
208 |
|
psubw mm1,mm5 |
209 |
|
movq mm2,[esi+16] |
210 |
|
pxor mm4,mm4 |
211 |
|
psraw mm1,mm7 |
212 |
|
pcmpgtw mm4,mm2 |
213 |
|
pxor mm5,mm5 |
214 |
|
psubw mm2,mm4 |
215 |
|
movq mm3,[esi+24] |
216 |
|
pcmpgtw mm5,mm3 |
217 |
|
psraw mm2,mm7 |
218 |
|
psubw mm3,mm5 |
219 |
|
movq [edx], mm0 |
220 |
|
psraw mm3,mm7 |
221 |
|
movq [edx + 8], mm1 |
222 |
|
movq [edx+16],mm2 |
223 |
|
lea esi, [esi+32] |
224 |
|
movq [edx+24],mm3 |
225 |
|
|
226 |
|
dec ecx |
227 |
|
lea edx, [edx+32] |
228 |
|
jne .loop_low |
229 |
|
|
230 |
add ecx, 2 |
.end |
231 |
cmp ecx, 16 |
mov edx, [esp + 4 + 4] ; coeff |
232 |
jnz .q1loop |
mov [edx],ax |
233 |
jmp short .done |
xor eax,eax ; return 0 |
234 |
|
|
235 |
|
pop esi |
236 |
|
ret |
237 |
|
|
238 |
|
|
239 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
250 |
quant_h263_intra_sse2: |
quant_h263_intra_sse2: |
251 |
|
|
252 |
push esi |
push esi |
|
push edi |
|
253 |
|
|
254 |
mov edi, [esp + 8 + 4] ; coeff |
mov esi, [esp + 4 + 8] ; data |
|
mov esi, [esp + 8 + 8] ; data |
|
|
mov eax, [esp + 8 + 12] ; quant |
|
255 |
|
|
256 |
xor ecx, ecx |
movsx eax, word [esi] ; data[0] |
|
cmp al, 1 |
|
|
jz near .qas2_q1loop |
|
257 |
|
|
258 |
.qas2_not1 |
mov ecx,[esp + 4 + 16] ; dcscalar |
259 |
movq mm7, [mmx_div + eax*8 - 8] |
mov edx,eax |
260 |
movq2dq xmm7, mm7 |
sar ecx,1 |
261 |
|
add eax,ecx |
262 |
|
sub edx,ecx |
263 |
|
cmovl eax,edx ; +/- dcscalar/2 |
264 |
|
mov ecx, [esp + 4 + 12] ; quant |
265 |
|
cdq |
266 |
|
idiv dword [esp + 4 + 16] ; dcscalar |
267 |
|
cmp ecx, 1 |
268 |
|
mov edx, [esp + 4 + 4] ; coeff |
269 |
|
movq xmm7, [mmx_div+ecx * 8 - 8] |
270 |
|
je .low |
271 |
|
|
272 |
|
mov ecx,2 |
273 |
movlhps xmm7, xmm7 |
movlhps xmm7, xmm7 |
274 |
|
|
275 |
ALIGN 16 |
.loop |
276 |
.qas2_loop |
movdqa xmm0, [esi] |
|
movdqa xmm0, [esi + ecx*8] ; xmm0 = [1st] |
|
|
movdqa xmm3, [esi + ecx*8 + 16] ; xmm3 = [2nd] |
|
|
pxor xmm1, xmm1 |
|
277 |
pxor xmm4, xmm4 |
pxor xmm4, xmm4 |
278 |
pcmpgtw xmm1, xmm0 |
movdqa xmm1, [esi + 16] |
279 |
pcmpgtw xmm4, xmm3 |
pcmpgtw xmm4,xmm0 |
280 |
pxor xmm0, xmm1 |
pxor xmm5,xmm5 |
|
pxor xmm3, xmm4 |
|
|
psubw xmm0, xmm1 |
|
|
psubw xmm3, xmm4 |
|
281 |
pmulhw xmm0, xmm7 |
pmulhw xmm0, xmm7 |
282 |
|
pcmpgtw xmm5,xmm1 |
283 |
|
movdqa xmm2, [esi+32] |
284 |
|
psubw xmm0,xmm4 |
285 |
|
pmulhw xmm1,xmm7 |
286 |
|
pxor xmm4,xmm4 |
287 |
|
movdqa xmm3,[esi+48] |
288 |
|
pcmpgtw xmm4,xmm2 |
289 |
|
psubw xmm1,xmm5 |
290 |
|
pmulhw xmm2,xmm7 |
291 |
|
pxor xmm5,xmm5 |
292 |
|
pcmpgtw xmm5,xmm3 |
293 |
pmulhw xmm3, xmm7 |
pmulhw xmm3, xmm7 |
294 |
pxor xmm0, xmm1 |
psubw xmm2,xmm4 |
295 |
pxor xmm3, xmm4 |
psubw xmm3,xmm5 |
296 |
psubw xmm0, xmm1 |
movdqa [edx], xmm0 |
297 |
psubw xmm3, xmm4 |
lea esi, [esi+64] |
298 |
movdqa [edi + ecx*8], xmm0 |
movdqa [edx + 16], xmm1 |
299 |
movdqa [edi + ecx*8 + 16], xmm3 |
movdqa [edx + 32], xmm2 |
300 |
|
movdqa [edx + 48], xmm3 |
301 |
add ecx, 4 |
|
302 |
cmp ecx, 16 |
dec ecx |
303 |
jnz .qas2_loop |
lea edx, [edx+64] |
304 |
|
jne .loop |
305 |
.qas2_done |
jmp .end |
306 |
mov ecx, [esp + 8 + 16] ; dcscalar |
|
307 |
mov edx, ecx |
.low |
308 |
movsx eax, word [esi] |
movd xmm7,ecx |
309 |
shr edx, 1 |
mov ecx,2 |
310 |
cmp eax, 0 |
.loop_low |
311 |
jg .qas2_gtzero |
movdqa xmm0, [esi] |
312 |
|
pxor xmm4,xmm4 |
313 |
sub eax, edx |
movdqa xmm1, [esi + 16] |
314 |
jmp short .qas2_mul |
pcmpgtw xmm4,xmm0 |
315 |
|
pxor xmm5,xmm5 |
316 |
.qas2_gtzero |
psubw xmm0,xmm4 |
317 |
add eax, edx |
pcmpgtw xmm5,xmm1 |
318 |
|
psraw xmm0,xmm7 |
319 |
.qas2_mul |
psubw xmm1,xmm5 |
320 |
cdq |
movdqa xmm2,[esi+32] |
321 |
idiv ecx |
pxor xmm4,xmm4 |
322 |
|
psraw xmm1,xmm7 |
323 |
|
pcmpgtw xmm4,xmm2 |
324 |
|
pxor xmm5,xmm5 |
325 |
|
psubw xmm2,xmm4 |
326 |
|
movdqa xmm3,[esi+48] |
327 |
|
pcmpgtw xmm5,xmm3 |
328 |
|
psraw xmm2,xmm7 |
329 |
|
psubw xmm3,xmm5 |
330 |
|
movdqa [edx], xmm0 |
331 |
|
psraw xmm3,xmm7 |
332 |
|
movdqa [edx+16], xmm1 |
333 |
|
movdqa [edx+32],xmm2 |
334 |
|
lea esi, [esi+64] |
335 |
|
movdqa [edx+48],xmm3 |
336 |
|
|
337 |
|
dec ecx |
338 |
|
lea edx, [edx+64] |
339 |
|
jne .loop_low |
340 |
|
|
341 |
mov [edi], ax |
.end |
342 |
|
mov edx, [esp + 4 + 4] ; coeff |
343 |
|
mov [edx],ax |
344 |
|
xor eax,eax ; return 0 |
345 |
|
|
|
xor eax, eax ; return(0); |
|
|
pop edi |
|
346 |
pop esi |
pop esi |
|
|
|
347 |
ret |
ret |
348 |
|
|
|
ALIGN 16 |
|
|
.qas2_q1loop |
|
|
movdqa xmm0, [esi + ecx*8] ; xmm0 = [1st] |
|
|
movdqa xmm3, [esi + ecx*8 + 16] ; xmm3 = [2nd] |
|
|
pxor xmm1, xmm1 |
|
|
pxor xmm4, xmm4 |
|
|
pcmpgtw xmm1, xmm0 |
|
|
pcmpgtw xmm4, xmm3 |
|
|
pxor xmm0, xmm1 |
|
|
pxor xmm3, xmm4 |
|
|
psubw xmm0, xmm1 |
|
|
psubw xmm3, xmm4 |
|
|
psrlw xmm0, 1 |
|
|
psrlw xmm3, 1 |
|
|
pxor xmm0, xmm1 |
|
|
pxor xmm3, xmm4 |
|
|
psubw xmm0, xmm1 |
|
|
psubw xmm3, xmm4 |
|
|
movdqa [edi + ecx*8], xmm0 |
|
|
movdqa [edi + ecx*8 + 16], xmm3 |
|
|
|
|
|
add ecx, 4 |
|
|
cmp ecx, 16 |
|
|
jnz .qas2_q1loop |
|
|
jmp near .qas2_done |
|
|
|
|
|
|
|
|
|
|
349 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
350 |
; |
; |
351 |
; uint32_t quant_h263_inter_mmx(int16_t * coeff, |
; uint32_t quant_h263_inter_mmx(int16_t * coeff, |
574 |
; |
; |
575 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
576 |
|
|
|
; note: we only saturate to +2047 *before* restoring the sign. |
|
|
; Hence, final clamp really is [-2048,2047] |
|
|
|
|
577 |
ALIGN 16 |
ALIGN 16 |
578 |
dequant_h263_intra_mmx: |
dequant_h263_intra_mmx: |
579 |
|
|
580 |
|
mov ecx, [esp+12] ; quant |
581 |
|
mov eax, [esp+ 8] ; coeff |
582 |
|
pcmpeqw mm0,mm0 |
583 |
|
movq mm6, [mmx_quant + ecx*8] ; quant |
584 |
|
shl ecx,31 ; quant & 1 ? 0 : - 1 |
585 |
|
movq mm7,mm6 |
586 |
|
movq mm5,mm0 |
587 |
|
movd mm1,ecx |
588 |
mov edx, [esp+ 4] ; data |
mov edx, [esp+ 4] ; data |
589 |
mov ecx, [esp+ 8] ; coeff |
psllw mm0,mm1 |
590 |
mov eax, [esp+12] ; quant |
paddw mm7,mm7 ; 2*quant |
591 |
movq mm6, [mmx_add + eax*8 - 8] ; quant or quant-1 |
paddw mm6,mm0 ; quant-1 |
592 |
movq mm7, [mmx_mul + eax*8 - 8] ; 2*quant |
psllw mm5,12 |
593 |
mov eax, -16 |
mov ecx,8 |
594 |
|
psrlw mm5,1 |
595 |
|
|
596 |
ALIGN 16 |
.loop: |
597 |
.loop |
movq mm0,[eax] |
598 |
movq mm0, [ecx+8*eax+8*16] ; c = coeff[i] |
pxor mm2,mm2 |
|
movq mm3, [ecx+8*eax+8*16 + 8] ; c' = coeff[i+1] |
|
|
pxor mm1, mm1 |
|
599 |
pxor mm4, mm4 |
pxor mm4, mm4 |
600 |
pcmpgtw mm1, mm0 ; sign(c) |
pcmpgtw mm2,mm0 |
601 |
pcmpgtw mm4, mm3 ; sign(c') |
pcmpeqw mm4,mm0 |
602 |
|
pmullw mm0,mm7 ; * 2 * quant |
603 |
|
movq mm1,[eax+8] |
604 |
|
psubw mm0,mm2 |
605 |
|
pxor mm2,mm6 |
606 |
|
pxor mm3,mm3 |
607 |
|
pandn mm4,mm2 |
608 |
pxor mm2, mm2 |
pxor mm2, mm2 |
609 |
pxor mm5, mm5 |
pcmpgtw mm3,mm1 |
610 |
pcmpeqw mm2, mm0 ; c is zero |
pcmpeqw mm2,mm1 |
611 |
pcmpeqw mm5, mm3 ; c' is zero |
pmullw mm1,mm7 |
612 |
pandn mm2, mm6 ; offset = isZero ? 0 : quant_add |
paddw mm0,mm4 |
613 |
pandn mm5, mm6 |
psubw mm1,mm3 |
614 |
pxor mm0, mm1 ; negate if negative |
pxor mm3,mm6 |
615 |
pxor mm3, mm4 ; negate if negative |
pandn mm2,mm3 |
616 |
psubw mm0, mm1 |
paddsw mm0, mm5 ; saturate |
617 |
psubw mm3, mm4 |
paddw mm1,mm2 |
618 |
pmullw mm0, mm7 ; *= 2Q |
|
619 |
pmullw mm3, mm7 ; *= 2Q |
paddsw mm1, mm5 |
620 |
paddw mm0, mm2 ; + offset |
psubsw mm0, mm5 |
621 |
paddw mm3, mm5 ; + offset |
psubsw mm1, mm5 |
622 |
paddw mm0, mm1 ; negate back |
psubsw mm0, mm5 |
623 |
paddw mm3, mm4 ; negate back |
psubsw mm1, mm5 |
624 |
|
paddsw mm0, mm5 |
625 |
; saturates to +2047 |
paddsw mm1, mm5 |
626 |
movq mm2, [mmx_32767_minus_2047] |
|
627 |
add eax, 2 |
movq [edx],mm0 |
628 |
paddsw mm0, mm2 |
lea eax,[eax+16] |
629 |
paddsw mm3, mm2 |
movq [edx+8],mm1 |
630 |
psubsw mm0, mm2 |
|
631 |
psubsw mm3, mm2 |
dec ecx |
632 |
|
lea edx,[edx+16] |
633 |
pxor mm0, mm1 |
jne .loop |
|
pxor mm3, mm4 |
|
|
movq [edx + 8*eax + 8*16 - 2*8], mm0 |
|
|
movq [edx + 8*eax + 8*16+8 - 2*8], mm3 |
|
|
jnz near .loop |
|
634 |
|
|
635 |
; deal with DC |
; deal with DC |
636 |
movd mm0, [ecx] |
mov eax, [esp+ 8] ; coeff |
637 |
pmullw mm0, [esp+16] ; dcscalar |
movd mm1,[esp+16] ; dcscalar |
638 |
movq mm2, [mmx_32767_minus_2047] |
movd mm0,[eax] ; coeff[0] |
639 |
paddsw mm0, mm2 |
pmullw mm0,mm1 ; * dcscalar |
640 |
psubsw mm0, mm2 |
mov edx, [esp+ 4] ; data |
641 |
movq mm3, [mmx_32768_minus_2048] |
paddsw mm0, mm5 ; saturate + |
642 |
psubsw mm0, mm3 |
psubsw mm0, mm5 |
643 |
paddsw mm0, mm3 |
psubsw mm0, mm5 ; saturate - |
644 |
|
paddsw mm0, mm5 |
645 |
movd eax, mm0 |
movd eax, mm0 |
646 |
mov [edx], ax |
mov [edx], ax |
647 |
|
|
648 |
xor eax, eax ; return(0); |
xor eax, eax ; return 0 |
649 |
ret |
ret |
650 |
|
|
651 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
658 |
; |
; |
659 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
660 |
|
|
|
; this is the same as dequant_inter_mmx, except that we're |
|
|
; saturating using 'pminsw' (saves 2 cycles/loop => ~5% faster) |
|
661 |
|
|
662 |
ALIGN 16 |
ALIGN 16 |
663 |
dequant_h263_intra_xmm: |
dequant_h263_intra_xmm: |
664 |
|
|
665 |
|
mov ecx, [esp+12] ; quant |
666 |
|
mov eax, [esp+ 8] ; coeff |
667 |
|
|
668 |
|
movd mm6,ecx ; quant |
669 |
|
pcmpeqw mm0,mm0 |
670 |
|
pshufw mm6,mm6,0 ; all quant |
671 |
|
shl ecx,31 |
672 |
|
movq mm5,mm0 |
673 |
|
movq mm7,mm6 |
674 |
|
movd mm1,ecx |
675 |
mov edx, [esp+ 4] ; data |
mov edx, [esp+ 4] ; data |
676 |
mov ecx, [esp+ 8] ; coeff |
psllw mm0,mm1 ; quant & 1 ? 0 : - 1 |
677 |
mov eax, [esp+12] ; quant |
movq mm4,mm5 |
678 |
movq mm6, [mmx_add + eax*8 - 8] ; quant or quant-1 |
paddw mm7,mm7 ; quant*2 |
679 |
movq mm7, [mmx_mul + eax*8 - 8] ; 2*quant |
paddw mm6,mm0 ; quant-1 |
680 |
mov eax, -16 |
psrlw mm4,5 ; mm4=2047 |
681 |
|
mov ecx,8 |
682 |
|
pxor mm5,mm4 ; mm5=-2048 |
683 |
|
|
684 |
ALIGN 16 |
.loop: |
685 |
.loop |
movq mm0,[eax] |
|
movq mm0, [ecx+8*eax+8*16] ; c = coeff[i] |
|
|
movq mm3, [ecx+8*eax+8*16 + 8] ; c' = coeff[i+1] |
|
|
pxor mm1, mm1 |
|
|
pxor mm4, mm4 |
|
|
pcmpgtw mm1, mm0 ; sign(c) |
|
|
pcmpgtw mm4, mm3 ; sign(c') |
|
686 |
pxor mm2, mm2 |
pxor mm2, mm2 |
687 |
pxor mm5, mm5 |
pxor mm3,mm3 |
688 |
pcmpeqw mm2, mm0 ; c is zero |
|
689 |
pcmpeqw mm5, mm3 ; c' is zero |
pcmpgtw mm2,mm0 |
690 |
pandn mm2, mm6 ; offset = isZero ? 0 : quant_add |
pcmpeqw mm3,mm0 ; if coeff==0... |
691 |
pandn mm5, mm6 |
pmullw mm0,mm7 ; * 2 * quant |
692 |
pxor mm0, mm1 ; negate if negative |
movq mm1,[eax+8] |
693 |
pxor mm3, mm4 ; negate if negative |
|
694 |
psubw mm0, mm1 |
psubw mm0,mm2 |
695 |
psubw mm3, mm4 |
pxor mm2,mm6 |
696 |
pmullw mm0, mm7 ; *= 2Q |
pandn mm3,mm2 ; ...then data=0 |
697 |
pmullw mm3, mm7 ; *= 2Q |
pxor mm2,mm2 |
698 |
paddw mm0, mm2 ; + offset |
paddw mm0,mm3 |
699 |
paddw mm3, mm5 ; + offset |
pxor mm3,mm3 |
700 |
paddw mm0, mm1 ; negate back |
pcmpeqw mm2,mm1 |
701 |
paddw mm3, mm4 ; negate back |
pcmpgtw mm3,mm1 |
702 |
|
pmullw mm1,mm7 |
703 |
; saturates to +2047 |
|
704 |
movq mm2, [mmx_2047] |
pminsw mm0,mm4 |
705 |
pminsw mm0, mm2 |
psubw mm1,mm3 |
706 |
add eax, 2 |
pxor mm3,mm6 |
707 |
pminsw mm3, mm2 |
pandn mm2,mm3 |
708 |
|
paddw mm1,mm2 |
709 |
pxor mm0, mm1 |
|
710 |
pxor mm3, mm4 |
pmaxsw mm0,mm5 |
711 |
movq [edx + 8*eax + 8*16 - 2*8], mm0 |
pminsw mm1,mm4 |
712 |
movq [edx + 8*eax + 8*16+8 - 2*8], mm3 |
movq [edx],mm0 |
713 |
jnz near .loop |
pmaxsw mm1,mm5 |
714 |
|
lea eax,[eax+16] |
715 |
|
movq [edx+8],mm1 |
716 |
|
|
717 |
|
dec ecx |
718 |
|
lea edx,[edx+16] |
719 |
|
jne .loop |
720 |
|
|
721 |
; deal with DC |
; deal with DC |
722 |
movd mm0, [ecx] |
mov eax, [esp+ 8] ; coeff |
723 |
pmullw mm0, [esp+16] ; dcscalar |
movd mm1,[esp+16] ; dcscalar |
724 |
movq mm2, [mmx_32767_minus_2047] |
movd mm0, [eax] |
725 |
paddsw mm0, mm2 |
pmullw mm0, mm1 |
726 |
psubsw mm0, mm2 |
mov edx, [esp+ 4] ; data |
727 |
movq mm2, [mmx_32768_minus_2048] |
pminsw mm0,mm4 |
728 |
psubsw mm0, mm2 |
pmaxsw mm0,mm5 |
|
paddsw mm0, mm2 |
|
729 |
movd eax, mm0 |
movd eax, mm0 |
730 |
mov [edx], ax |
mov [edx], ax |
731 |
|
|
732 |
xor eax, eax |
xor eax, eax ; return 0 |
733 |
ret |
ret |
734 |
|
|
735 |
|
|
745 |
|
|
746 |
ALIGN 16 |
ALIGN 16 |
747 |
dequant_h263_intra_sse2: |
dequant_h263_intra_sse2: |
748 |
|
|
749 |
|
mov ecx, [esp+12] ; quant |
750 |
|
mov eax, [esp+ 8] ; coeff |
751 |
|
|
752 |
|
movd xmm6,ecx ; quant |
753 |
|
|
754 |
|
shl ecx,31 |
755 |
|
pshuflw xmm6,xmm6,0 |
756 |
|
pcmpeqw xmm0,xmm0 |
757 |
|
movlhps xmm6,xmm6 ; all quant |
758 |
|
movd xmm1,ecx |
759 |
|
movdqa xmm5,xmm0 |
760 |
|
movdqa xmm7,xmm6 |
761 |
mov edx, [esp+ 4] ; data |
mov edx, [esp+ 4] ; data |
762 |
mov ecx, [esp+ 8] ; coeff |
paddw xmm7,xmm7 ; quant *2 |
763 |
mov eax, [esp+12] ; quant |
psllw xmm0,xmm1 ; quant & 1 ? 0 : - 1 |
764 |
movq mm6, [mmx_add + eax * 8 - 8] |
movdqa xmm4,xmm5 |
765 |
movq mm7, [mmx_mul + eax * 8 - 8] |
paddw xmm6,xmm0 ; quant-1 |
766 |
movq2dq xmm6, mm6 |
psrlw xmm4,5 ; 2047 |
767 |
movq2dq xmm7, mm7 |
mov ecx,4 |
768 |
movlhps xmm6, xmm6 |
pxor xmm5,xmm4 ; mm5=-2048 |
|
movlhps xmm7, xmm7 |
|
|
mov eax, -16 |
|
769 |
|
|
770 |
ALIGN 16 |
.loop: |
771 |
.loop |
movdqa xmm0,[eax] |
|
movdqa xmm0, [ecx + 8*16 + 8*eax] ; c = coeff[i] |
|
|
movdqa xmm3, [ecx + 8*16 + 8*eax+ 16] |
|
|
pxor xmm1, xmm1 |
|
|
pxor xmm4, xmm4 |
|
|
pcmpgtw xmm1, xmm0 ; sign(c) |
|
|
pcmpgtw xmm4, xmm3 |
|
772 |
pxor xmm2, xmm2 |
pxor xmm2, xmm2 |
773 |
pxor xmm5, xmm5 |
pxor xmm3,xmm3 |
|
pcmpeqw xmm2, xmm0 ; c is zero |
|
|
pcmpeqw xmm5, xmm3 |
|
|
pandn xmm2, xmm6 ; offset = isZero ? 0 : quant_add |
|
|
pandn xmm5, xmm6 |
|
|
pxor xmm0, xmm1 ; negate if negative |
|
|
pxor xmm3, xmm4 |
|
|
psubw xmm0, xmm1 |
|
|
psubw xmm3, xmm4 |
|
|
pmullw xmm0, xmm7 ; *= 2Q |
|
|
pmullw xmm3, xmm7 |
|
|
paddw xmm0, xmm2 ; + offset |
|
|
paddw xmm3, xmm5 |
|
|
paddw xmm0, xmm1 ; negate back |
|
|
paddw xmm3, xmm4 |
|
|
|
|
|
; saturates to +2047 |
|
|
movdqa xmm2, [sse2_2047] |
|
|
pminsw xmm0, xmm2 |
|
|
add eax, 4 |
|
|
pminsw xmm3, xmm2 |
|
774 |
|
|
775 |
pxor xmm0, xmm1 |
pcmpgtw xmm2,xmm0 |
776 |
pxor xmm3, xmm4 |
pcmpeqw xmm3,xmm0 |
777 |
movdqa [edx + 8*16 - 8*4 + 8*eax], xmm0 |
pmullw xmm0,xmm7 ; * 2 * quant |
778 |
movdqa [edx + 8*16 - 8*4 + 8*eax + 16], xmm3 |
movdqa xmm1,[eax+16] |
779 |
jnz near .loop |
|
780 |
|
psubw xmm0,xmm2 |
781 |
|
pxor xmm2,xmm6 |
782 |
|
pandn xmm3,xmm2 |
783 |
|
pxor xmm2,xmm2 |
784 |
|
paddw xmm0,xmm3 |
785 |
|
pxor xmm3,xmm3 |
786 |
|
pcmpeqw xmm2,xmm1 |
787 |
|
pcmpgtw xmm3,xmm1 |
788 |
|
pmullw xmm1,xmm7 |
789 |
|
|
790 |
|
pminsw xmm0,xmm4 |
791 |
|
psubw xmm1,xmm3 |
792 |
|
pxor xmm3,xmm6 |
793 |
|
pandn xmm2,xmm3 |
794 |
|
paddw xmm1,xmm2 |
795 |
|
|
796 |
|
pmaxsw xmm0,xmm5 |
797 |
|
pminsw xmm1,xmm4 |
798 |
|
movdqa [edx],xmm0 |
799 |
|
pmaxsw xmm1,xmm5 |
800 |
|
lea eax,[eax+32] |
801 |
|
movdqa [edx+16],xmm1 |
802 |
|
|
803 |
|
dec ecx |
804 |
|
lea edx,[edx+32] |
805 |
|
jne .loop |
806 |
|
|
807 |
; deal with DC |
; deal with DC |
808 |
movd mm0, [ecx] |
|
809 |
pmullw mm0, [esp+16] ; dcscalar |
mov eax, [esp+ 8] ; coeff |
810 |
movq mm2, [mmx_32767_minus_2047] |
movsx eax,word [eax] |
811 |
paddsw mm0, mm2 |
imul dword [esp+16] ; dcscalar |
812 |
psubsw mm0, mm2 |
mov edx, [esp+ 4] ; data |
813 |
movq mm2, [mmx_32768_minus_2048] |
movd xmm0,eax |
814 |
psubsw mm0, mm2 |
pminsw xmm0,xmm4 |
815 |
paddsw mm0, mm2 |
pmaxsw xmm0,xmm5 |
816 |
movd eax, mm0 |
movd eax,xmm0 |
817 |
|
|
818 |
mov [edx], ax |
mov [edx], ax |
819 |
|
|
820 |
xor eax, eax |
xor eax, eax ; return 0 |
821 |
ret |
ret |
822 |
|
|
823 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
832 |
ALIGN 16 |
ALIGN 16 |
833 |
dequant_h263_inter_mmx: |
dequant_h263_inter_mmx: |
834 |
|
|
835 |
|
mov ecx, [esp+12] ; quant |
836 |
|
mov eax, [esp+ 8] ; coeff |
837 |
|
pcmpeqw mm0,mm0 |
838 |
|
movq mm6, [mmx_quant + ecx*8] ; quant |
839 |
|
shl ecx,31 ; odd/even |
840 |
|
movq mm7,mm6 |
841 |
|
movd mm1,ecx |
842 |
mov edx, [esp+ 4] ; data |
mov edx, [esp+ 4] ; data |
843 |
mov ecx, [esp+ 8] ; coeff |
movq mm5,mm0 |
844 |
mov eax, [esp+12] ; quant |
psllw mm0,mm1 ; quant & 1 ? 0 : - 1 |
845 |
movq mm6, [mmx_add + eax*8 - 8] ; quant or quant-1 |
paddw mm7,mm7 ; quant*2 |
846 |
movq mm7, [mmx_mul + eax*8 - 8] ; 2*quant |
paddw mm6,mm0 ; quant & 1 ? quant : quant - 1 |
847 |
mov eax, -16 |
psllw mm5,12 |
848 |
|
mov ecx,8 |
849 |
|
psrlw mm5,1 ; 32767-2047 (32768-2048) |
850 |
|
|
851 |
ALIGN 16 |
.loop: |
852 |
.loop |
movq mm0,[eax] |
|
movq mm0, [ecx+8*eax+8*16] ; c = coeff[i] |
|
|
movq mm3, [ecx+8*eax+8*16 + 8] ; c' = coeff[i+1] |
|
|
pxor mm1, mm1 |
|
853 |
pxor mm4, mm4 |
pxor mm4, mm4 |
|
pcmpgtw mm1, mm0 ; sign(c) |
|
|
pcmpgtw mm4, mm3 ; sign(c') |
|
854 |
pxor mm2, mm2 |
pxor mm2, mm2 |
855 |
pxor mm5, mm5 |
pcmpeqw mm4,mm0 ; if coeff==0... |
856 |
pcmpeqw mm2, mm0 ; c is zero |
pcmpgtw mm2,mm0 |
857 |
pcmpeqw mm5, mm3 ; c' is zero |
pmullw mm0,mm7 ; * 2 * quant |
858 |
pandn mm2, mm6 ; offset = isZero ? 0 : quant_add |
pxor mm3,mm3 |
859 |
pandn mm5, mm6 |
psubw mm0,mm2 |
860 |
pxor mm0, mm1 ; negate if negative |
movq mm1,[eax+8] |
861 |
pxor mm3, mm4 ; negate if negative |
pxor mm2,mm6 |
862 |
psubw mm0, mm1 |
pcmpgtw mm3,mm1 |
863 |
psubw mm3, mm4 |
pandn mm4,mm2 ; ... then data==0 |
864 |
pmullw mm0, mm7 ; *= 2Q |
pmullw mm1,mm7 |
865 |
pmullw mm3, mm7 ; *= 2Q |
pxor mm2,mm2 |
866 |
paddw mm0, mm2 ; + offset |
pcmpeqw mm2,mm1 |
867 |
paddw mm3, mm5 ; + offset |
psubw mm1,mm3 |
868 |
paddw mm0, mm1 ; negate back |
pxor mm3,mm6 |
869 |
paddw mm3, mm4 ; negate back |
pandn mm2,mm3 |
870 |
; saturates to +2047 |
paddw mm0,mm4 |
871 |
movq mm2, [mmx_32767_minus_2047] |
paddw mm1,mm2 |
872 |
add eax, 2 |
|
873 |
paddsw mm0, mm2 |
paddsw mm0, mm5 ; saturate |
874 |
paddsw mm3, mm2 |
paddsw mm1, mm5 |
875 |
psubsw mm0, mm2 |
psubsw mm0, mm5 |
876 |
psubsw mm3, mm2 |
psubsw mm1, mm5 |
877 |
|
psubsw mm0, mm5 |
878 |
pxor mm0, mm1 |
psubsw mm1, mm5 |
879 |
pxor mm3, mm4 |
paddsw mm0, mm5 |
880 |
movq [edx + 8*eax + 8*16 - 2*8], mm0 |
paddsw mm1, mm5 |
881 |
movq [edx + 8*eax + 8*16+8 - 2*8], mm3 |
|
882 |
jnz near .loop |
movq [edx],mm0 |
883 |
|
lea eax,[eax+16] |
884 |
|
movq [edx+8],mm1 |
885 |
|
|
886 |
|
dec ecx |
887 |
|
lea edx,[edx+16] |
888 |
|
jne .loop |
889 |
|
|
890 |
xor eax, eax |
xor eax, eax ; return 0 |
891 |
ret |
ret |
892 |
|
|
893 |
|
|
894 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
895 |
; |
; |
896 |
; uint32_t dequant_h263_inter_xmm(int16_t * data, |
; uint32_t dequant_h263_inter_xmm(int16_t * data, |
899 |
; const uint16_t *mpeg_matrices); |
; const uint16_t *mpeg_matrices); |
900 |
; |
; |
901 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
|
|
|
|
; this is the same as dequant_inter_mmx, |
|
|
; except that we're saturating using 'pminsw' (saves 2 cycles/loop) |
|
|
|
|
902 |
ALIGN 16 |
ALIGN 16 |
903 |
dequant_h263_inter_xmm: |
dequant_h263_inter_xmm: |
904 |
|
|
905 |
|
mov ecx, [esp+12] ; quant |
906 |
|
mov eax, [esp+ 8] ; coeff |
907 |
|
pcmpeqw mm0,mm0 |
908 |
|
movq mm6, [mmx_quant + ecx*8] ; quant |
909 |
|
shl ecx,31 |
910 |
|
movq mm5,mm0 |
911 |
|
movd mm1,ecx |
912 |
|
movq mm7,mm6 |
913 |
|
psllw mm0,mm1 |
914 |
mov edx, [esp+ 4] ; data |
mov edx, [esp+ 4] ; data |
915 |
mov ecx, [esp+ 8] ; coeff |
movq mm4,mm5 |
916 |
mov eax, [esp+12] ; quant |
paddw mm7,mm7 |
917 |
movq mm6, [mmx_add + eax*8 - 8] ; quant or quant-1 |
paddw mm6,mm0 ; quant-1 |
918 |
movq mm7, [mmx_mul + eax*8 - 8] ; 2*quant |
|
919 |
mov eax, -16 |
psrlw mm4,5 |
920 |
|
mov ecx,8 |
921 |
ALIGN 16 |
pxor mm5,mm4 ; mm5=-2048 |
922 |
.loop |
|
923 |
movq mm0, [ecx+8*eax+8*16] ; c = coeff[i] |
.loop: |
924 |
movq mm3, [ecx+8*eax+8*16 + 8] ; c' = coeff[i+1] |
movq mm0,[eax] |
925 |
pxor mm1, mm1 |
pxor mm3,mm3 |
|
pxor mm4, mm4 |
|
|
pcmpgtw mm1, mm0 ; sign(c) |
|
|
pcmpgtw mm4, mm3 ; sign(c') |
|
926 |
pxor mm2, mm2 |
pxor mm2, mm2 |
927 |
pxor mm5, mm5 |
pcmpeqw mm3,mm0 |
928 |
pcmpeqw mm2, mm0 ; c is zero |
pcmpgtw mm2,mm0 |
929 |
pcmpeqw mm5, mm3 ; c' is zero |
pmullw mm0,mm7 ; * 2 * quant |
930 |
pandn mm2, mm6 ; offset = isZero ? 0 : quant_add |
pandn mm3,mm6 |
931 |
pandn mm5, mm6 |
movq mm1,[eax+8] |
932 |
pxor mm0, mm1 ; negate if negative |
psubw mm0,mm2 |
933 |
pxor mm3, mm4 ; negate if negative |
pxor mm2,mm3 |
934 |
psubw mm0, mm1 |
pxor mm3,mm3 |
935 |
psubw mm3, mm4 |
paddw mm0,mm2 |
936 |
pmullw mm0, mm7 ; *= 2Q |
pxor mm2,mm2 |
937 |
pmullw mm3, mm7 ; *= 2Q |
pcmpgtw mm3,mm1 |
938 |
paddw mm0, mm2 ; + offset |
pcmpeqw mm2,mm1 |
939 |
paddw mm3, mm5 ; + offset |
pmullw mm1,mm7 |
940 |
paddw mm0, mm1 ; start restoring sign |
pandn mm2,mm6 |
941 |
paddw mm3, mm4 ; start restoring sign |
psubw mm1,mm3 |
942 |
; saturates to +2047 |
pxor mm3,mm2 |
943 |
movq mm2, [mmx_2047] |
paddw mm1,mm3 |
944 |
pminsw mm0, mm2 |
|
945 |
add eax, 2 |
pminsw mm0,mm4 |
946 |
pminsw mm3, mm2 |
pminsw mm1,mm4 |
947 |
|
pmaxsw mm0,mm5 |
948 |
pxor mm0, mm1 ; finish restoring sign |
pmaxsw mm1,mm5 |
949 |
pxor mm3, mm4 ; finish restoring sign |
|
950 |
movq [edx + 8*eax + 8*16 - 2*8], mm0 |
movq [edx],mm0 |
951 |
movq [edx + 8*eax + 8*16+8 - 2*8], mm3 |
lea eax,[eax+16] |
952 |
jnz near .loop |
movq [edx+8],mm1 |
953 |
|
|
954 |
|
dec ecx |
955 |
|
lea edx,[edx+16] |
956 |
|
jne .loop |
957 |
|
|
958 |
xor eax, eax |
xor eax, eax ; return 0 |
959 |
ret |
ret |
960 |
|
|
961 |
|
|
962 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
963 |
; |
; |
964 |
; uint32_t dequant_h263_inter_sse2(int16_t * data, |
; uint32_t dequant_h263_inter_sse2(int16_t * data, |
970 |
|
|
971 |
ALIGN 16 |
ALIGN 16 |
972 |
dequant_h263_inter_sse2: |
dequant_h263_inter_sse2: |
|
mov edx, [esp + 4] ; data |
|
|
mov ecx, [esp + 8] ; coeff |
|
|
mov eax, [esp + 12] ; quant |
|
|
movq mm6, [mmx_add + eax * 8 - 8] |
|
|
movq mm7, [mmx_mul + eax * 8 - 8] |
|
|
movq2dq xmm6, mm6 |
|
|
movq2dq xmm7, mm7 |
|
|
movlhps xmm6, xmm6 |
|
|
movlhps xmm7, xmm7 |
|
|
mov eax, -16 |
|
973 |
|
|
974 |
ALIGN 16 |
mov ecx, [esp+12] ; quant |
975 |
.loop |
mov eax, [esp+ 8] ; coeff |
|
movdqa xmm0, [ecx + 8*16 + 8*eax] ; c = coeff[i] |
|
|
movdqa xmm3, [ecx + 8*16 + 8*eax + 16] |
|
976 |
|
|
977 |
pxor xmm1, xmm1 |
movq xmm6, [mmx_quant + ecx*8] ; quant |
978 |
pxor xmm4, xmm4 |
inc ecx |
979 |
pcmpgtw xmm1, xmm0 ; sign(c) |
pcmpeqw xmm5,xmm5 |
980 |
pcmpgtw xmm4, xmm3 |
and ecx,1 |
981 |
|
movlhps xmm6,xmm6 |
982 |
|
movd xmm0,ecx |
983 |
|
movdqa xmm7,xmm6 |
984 |
|
pshuflw xmm0,xmm0,0 |
985 |
|
movdqa xmm4,xmm5 |
986 |
|
mov edx, [esp+ 4] ; data |
987 |
|
movlhps xmm0,xmm0 |
988 |
|
paddw xmm7,xmm7 |
989 |
|
psubw xmm6,xmm0 |
990 |
|
psrlw xmm4,5 ; 2047 |
991 |
|
mov ecx,4 |
992 |
|
pxor xmm5,xmm4 ; mm5=-2048 |
993 |
|
|
994 |
|
.loop: |
995 |
|
movdqa xmm0,[eax] |
996 |
|
pxor xmm3,xmm3 |
997 |
pxor xmm2, xmm2 |
pxor xmm2, xmm2 |
998 |
pxor xmm5, xmm5 |
pcmpeqw xmm3,xmm0 |
999 |
pcmpeqw xmm2, xmm0 ; c is zero |
pcmpgtw xmm2,xmm0 |
1000 |
pcmpeqw xmm5, xmm3 |
pmullw xmm0,xmm7 ; * 2 * quant |
1001 |
|
pandn xmm3,xmm6 |
1002 |
|
movdqa xmm1,[eax+16] |
1003 |
|
psubw xmm0,xmm2 |
1004 |
|
pxor xmm2,xmm3 |
1005 |
|
pxor xmm3,xmm3 |
1006 |
|
paddw xmm0,xmm2 |
1007 |
|
pxor xmm2,xmm2 |
1008 |
|
pcmpgtw xmm3,xmm1 |
1009 |
|
pcmpeqw xmm2,xmm1 |
1010 |
|
pmullw xmm1,xmm7 |
1011 |
pandn xmm2, xmm6 |
pandn xmm2, xmm6 |
1012 |
pandn xmm5, xmm6 |
psubw xmm1,xmm3 |
1013 |
pxor xmm0, xmm1 ; negate if negative |
pxor xmm3,xmm2 |
1014 |
pxor xmm3, xmm4 |
paddw xmm1,xmm3 |
1015 |
psubw xmm0, xmm1 |
|
1016 |
psubw xmm3, xmm4 |
pminsw xmm0,xmm4 |
1017 |
pmullw xmm0, xmm7 ; *= 2Q |
pminsw xmm1,xmm4 |
1018 |
pmullw xmm3, xmm7 |
pmaxsw xmm0,xmm5 |
1019 |
paddw xmm0, xmm2 ; + offset |
pmaxsw xmm1,xmm5 |
1020 |
paddw xmm3, xmm5 |
|
1021 |
|
movdqa [edx],xmm0 |
1022 |
paddw xmm0, xmm1 ; start restoring sign |
lea eax,[eax+32] |
1023 |
paddw xmm3, xmm4 |
movdqa [edx+16],xmm1 |
1024 |
|
|
1025 |
; saturates to +2047 |
dec ecx |
1026 |
movdqa xmm2, [sse2_2047] |
lea edx,[edx+32] |
1027 |
pminsw xmm0, xmm2 |
jne .loop |
|
add eax, 4 |
|
|
pminsw xmm3, xmm2 |
|
|
|
|
|
pxor xmm0, xmm1 ; finish restoring sign |
|
|
pxor xmm3, xmm4 |
|
|
movdqa [edx + 8*16 - 8*4 + 8*eax], xmm0 |
|
|
movdqa [edx + 8*16 - 8*4 + 8*eax + 16], xmm3 |
|
|
jnz near .loop |
|
1028 |
|
|
1029 |
xor eax, eax |
xor eax, eax ; return 0 |
1030 |
ret |
ret |
1031 |
|
|