21 |
; * along with this program ; if not, write to the Free Software |
; * along with this program ; if not, write to the Free Software |
22 |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
23 |
; * |
; * |
24 |
; * $Id: quantize_h263_mmx.asm,v 1.3 2004-04-12 15:49:56 edgomez Exp $ |
; * $Id: quantize_h263_mmx.asm,v 1.11.2.2 2008-12-02 14:00:09 Isibaar Exp $ |
25 |
; * |
; * |
26 |
; ****************************************************************************/ |
; ****************************************************************************/ |
27 |
|
|
28 |
; enable dequant saturate [-2048,2047], test purposes only. |
; enable dequant saturate [-2048,2047], test purposes only. |
29 |
%define SATURATE |
%define SATURATE |
30 |
|
|
31 |
BITS 32 |
%include "nasm.inc" |
|
|
|
|
%macro cglobal 1 |
|
|
%ifdef PREFIX |
|
|
global _%1 |
|
|
%define %1 _%1 |
|
|
%else |
|
|
global %1 |
|
|
%endif |
|
|
%endmacro |
|
32 |
|
|
33 |
;============================================================================= |
;============================================================================= |
34 |
; Read only Local data |
; Read only Local data |
35 |
;============================================================================= |
;============================================================================= |
36 |
|
|
37 |
%ifdef FORMAT_COFF |
DATA |
|
SECTION .rodata data |
|
|
%else |
|
|
SECTION .rodata data align=16 |
|
|
%endif |
|
38 |
|
|
39 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
40 |
plus_one: |
plus_one: |
41 |
times 8 dw 1 |
times 8 dw 1 |
42 |
|
|
46 |
; |
; |
47 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
48 |
|
|
49 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
50 |
mmx_quant: |
mmx_quant: |
51 |
%assign quant 0 |
%assign quant 0 |
52 |
%rep 32 |
%rep 32 |
60 |
; |
; |
61 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
62 |
|
|
63 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
64 |
mmx_sub: |
mmx_sub: |
65 |
%assign quant 1 |
%assign quant 1 |
66 |
%rep 31 |
%rep 31 |
78 |
; |
; |
79 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
80 |
|
|
81 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
82 |
mmx_div: |
mmx_div: |
83 |
%assign quant 1 |
%assign quant 1 |
84 |
%rep 31 |
%rep 31 |
90 |
; Code |
; Code |
91 |
;============================================================================= |
;============================================================================= |
92 |
|
|
93 |
SECTION .text |
SECTION .rotext align=SECTION_ALIGN |
94 |
|
|
95 |
cglobal quant_h263_intra_mmx |
cglobal quant_h263_intra_mmx |
96 |
cglobal quant_h263_intra_sse2 |
cglobal quant_h263_intra_sse2 |
113 |
; |
; |
114 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
115 |
|
|
116 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
117 |
quant_h263_intra_mmx: |
quant_h263_intra_mmx: |
118 |
|
|
119 |
push esi |
mov _EAX, prm2 ; data |
120 |
|
mov TMP0, prm4 ; dcscalar |
121 |
mov esi, [esp + 4 + 8] ; data |
movsx _EAX, word [_EAX] ; data[0] |
122 |
|
|
123 |
movsx eax, word [esi] ; data[0] |
sar TMP0, 1 ; dcscalar /2 |
124 |
|
mov TMP1, _EAX |
125 |
mov ecx,[esp + 4 + 16] ; dcscalar |
sar TMP1, 31 ; sgn(data[0]) |
126 |
mov edx,eax |
xor TMP0,TMP1 ; *sgn(data[0]) |
127 |
sar ecx,1 |
sub _EAX,TMP1 |
128 |
add eax,ecx |
add _EAX,TMP0 ; + (dcscalar/2)*sgn(data[0]) |
129 |
sub edx,ecx |
|
130 |
cmovl eax,edx ; +/- dcscalar/2 |
mov TMP0, prm3 ; quant |
131 |
mov ecx, [esp + 4 + 12] ; quant |
lea TMP1, [mmx_div] |
132 |
|
movq mm7, [TMP1+TMP0 * 8 - 8] |
133 |
|
%ifdef ARCH_IS_X86_64 |
134 |
|
%ifdef WINDOWS |
135 |
|
mov TMP1, prm2 |
136 |
|
%endif |
137 |
|
%endif |
138 |
cdq |
cdq |
139 |
idiv dword [esp + 4 + 16] ; dcscalar |
idiv prm4d ; dcscalar |
140 |
cmp ecx, 1 |
%ifdef ARCH_IS_X86_64 |
141 |
mov edx, [esp + 4 + 4] ; coeff |
%ifdef WINDOWS |
142 |
|
mov prm2, TMP1 |
143 |
|
%endif |
144 |
|
%endif |
145 |
|
cmp TMP0, 1 |
146 |
|
mov TMP1, prm1 ; coeff |
147 |
je .low |
je .low |
148 |
|
|
149 |
movq mm7, [mmx_div+ecx * 8 - 8] |
mov TMP0, prm2 ; data |
150 |
mov ecx,4 |
push _EAX ; DC |
151 |
|
mov _EAX, TMP0 |
152 |
|
|
153 |
|
mov TMP0,4 |
154 |
|
|
155 |
.loop |
.loop: |
156 |
movq mm0, [esi] ; data |
movq mm0, [_EAX] ; data |
157 |
pxor mm4,mm4 |
pxor mm4,mm4 |
158 |
movq mm1, [esi + 8] |
movq mm1, [_EAX + 8] |
159 |
pcmpgtw mm4,mm0 ; (data<0) |
pcmpgtw mm4,mm0 ; (data<0) |
160 |
pxor mm5,mm5 |
pxor mm5,mm5 |
161 |
pmulhw mm0,mm7 ; /(2*quant) |
pmulhw mm0,mm7 ; /(2*quant) |
162 |
pcmpgtw mm5,mm1 |
pcmpgtw mm5,mm1 |
163 |
movq mm2, [esi+16] |
movq mm2, [_EAX+16] |
164 |
psubw mm0,mm4 ; +(data<0) |
psubw mm0,mm4 ; +(data<0) |
165 |
pmulhw mm1,mm7 |
pmulhw mm1,mm7 |
166 |
pxor mm4,mm4 |
pxor mm4,mm4 |
167 |
movq mm3,[esi+24] |
movq mm3,[_EAX+24] |
168 |
pcmpgtw mm4,mm2 |
pcmpgtw mm4,mm2 |
169 |
psubw mm1,mm5 |
psubw mm1,mm5 |
170 |
pmulhw mm2,mm7 |
pmulhw mm2,mm7 |
173 |
pmulhw mm3,mm7 |
pmulhw mm3,mm7 |
174 |
psubw mm2,mm4 |
psubw mm2,mm4 |
175 |
psubw mm3,mm5 |
psubw mm3,mm5 |
176 |
movq [edx], mm0 |
movq [TMP1], mm0 |
177 |
lea esi, [esi+32] |
lea _EAX, [_EAX+32] |
178 |
movq [edx + 8], mm1 |
movq [TMP1 + 8], mm1 |
179 |
movq [edx + 16], mm2 |
movq [TMP1 + 16], mm2 |
180 |
movq [edx + 24], mm3 |
movq [TMP1 + 24], mm3 |
181 |
|
|
182 |
dec ecx |
dec TMP0 |
183 |
lea edx, [edx+32] |
lea TMP1, [TMP1+32] |
184 |
jne .loop |
jne .loop |
185 |
jmp .end |
jmp .end |
186 |
|
|
187 |
.low |
.low: |
188 |
movd mm7,ecx |
movd mm7,TMP0d |
189 |
mov ecx,4 |
|
190 |
.loop_low |
mov TMP0, prm2 |
191 |
movq mm0, [esi] |
push _EAX |
192 |
|
mov _EAX, TMP0 |
193 |
|
|
194 |
|
mov TMP0,4 |
195 |
|
.loop_low: |
196 |
|
movq mm0, [_EAX] |
197 |
pxor mm4,mm4 |
pxor mm4,mm4 |
198 |
movq mm1, [esi + 8] |
movq mm1, [_EAX + 8] |
199 |
pcmpgtw mm4,mm0 |
pcmpgtw mm4,mm0 |
200 |
pxor mm5,mm5 |
pxor mm5,mm5 |
201 |
psubw mm0,mm4 |
psubw mm0,mm4 |
202 |
pcmpgtw mm5,mm1 |
pcmpgtw mm5,mm1 |
203 |
psraw mm0,mm7 |
psraw mm0,mm7 |
204 |
psubw mm1,mm5 |
psubw mm1,mm5 |
205 |
movq mm2,[esi+16] |
movq mm2,[_EAX+16] |
206 |
pxor mm4,mm4 |
pxor mm4,mm4 |
207 |
psraw mm1,mm7 |
psraw mm1,mm7 |
208 |
pcmpgtw mm4,mm2 |
pcmpgtw mm4,mm2 |
209 |
pxor mm5,mm5 |
pxor mm5,mm5 |
210 |
psubw mm2,mm4 |
psubw mm2,mm4 |
211 |
movq mm3,[esi+24] |
movq mm3,[_EAX+24] |
212 |
pcmpgtw mm5,mm3 |
pcmpgtw mm5,mm3 |
213 |
psraw mm2,mm7 |
psraw mm2,mm7 |
214 |
psubw mm3,mm5 |
psubw mm3,mm5 |
215 |
movq [edx], mm0 |
movq [TMP1], mm0 |
216 |
psraw mm3,mm7 |
psraw mm3,mm7 |
217 |
movq [edx + 8], mm1 |
movq [TMP1 + 8], mm1 |
218 |
movq [edx+16],mm2 |
movq [TMP1+16],mm2 |
219 |
lea esi, [esi+32] |
lea _EAX, [_EAX+32] |
220 |
movq [edx+24],mm3 |
movq [TMP1+24],mm3 |
221 |
|
|
222 |
dec ecx |
dec TMP0 |
223 |
lea edx, [edx+32] |
lea TMP1, [TMP1+32] |
224 |
jne .loop_low |
jne .loop_low |
225 |
|
|
226 |
.end |
.end: |
227 |
mov edx, [esp + 4 + 4] ; coeff |
|
228 |
mov [edx],ax |
pop _EAX |
229 |
xor eax,eax ; return 0 |
|
230 |
|
mov TMP1, prm1 ; coeff |
231 |
|
mov [TMP1],ax |
232 |
|
xor _EAX,_EAX ; return 0 |
233 |
|
|
|
pop esi |
|
234 |
ret |
ret |
235 |
|
ENDFUNC |
236 |
|
|
237 |
|
|
238 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
245 |
; |
; |
246 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
247 |
|
|
248 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
249 |
quant_h263_intra_sse2: |
quant_h263_intra_sse2: |
250 |
|
PUSH_XMM6_XMM7 |
251 |
|
mov _EAX, prm2 ; data |
252 |
|
|
253 |
push esi |
movsx _EAX, word [_EAX] ; data[0] |
|
|
|
|
mov esi, [esp + 4 + 8] ; data |
|
|
|
|
|
movsx eax, word [esi] ; data[0] |
|
254 |
|
|
255 |
mov ecx,[esp + 4 + 16] ; dcscalar |
mov TMP0,prm4 ; dcscalar |
256 |
mov edx,eax |
mov TMP1,_EAX |
257 |
sar ecx,1 |
sar TMP0,1 |
258 |
add eax,ecx |
add _EAX,TMP0 |
259 |
sub edx,ecx |
sub TMP1,TMP0 |
260 |
cmovl eax,edx ; +/- dcscalar/2 |
cmovl _EAX,TMP1 ; +/- dcscalar/2 |
261 |
mov ecx, [esp + 4 + 12] ; quant |
mov TMP0, prm3 ; quant |
262 |
|
lea TMP1, [mmx_div] |
263 |
|
movq xmm7, [TMP1+TMP0 * 8 - 8] |
264 |
|
|
265 |
|
%ifdef ARCH_IS_X86_64 |
266 |
|
%ifdef WINDOWS |
267 |
|
mov TMP1, prm2 |
268 |
|
%endif |
269 |
|
%endif |
270 |
cdq |
cdq |
271 |
idiv dword [esp + 4 + 16] ; dcscalar |
idiv prm4d ; dcscalar |
272 |
cmp ecx, 1 |
%ifdef ARCH_IS_X86_64 |
273 |
mov edx, [esp + 4 + 4] ; coeff |
%ifdef WINDOWS |
274 |
movq xmm7, [mmx_div+ecx * 8 - 8] |
mov prm2, TMP1 |
275 |
je .low |
%endif |
276 |
|
%endif |
277 |
|
cmp TMP0, 1 |
278 |
|
mov TMP1, prm1 ; coeff |
279 |
|
je near .low |
280 |
|
|
281 |
|
mov TMP0, prm2 |
282 |
|
push _EAX ; DC |
283 |
|
mov _EAX, TMP0 |
284 |
|
|
285 |
mov ecx,2 |
mov TMP0,2 |
286 |
movlhps xmm7,xmm7 |
movlhps xmm7,xmm7 |
287 |
|
|
288 |
.loop |
.loop: |
289 |
movdqa xmm0, [esi] |
movdqa xmm0, [_EAX] |
290 |
pxor xmm4,xmm4 |
pxor xmm4,xmm4 |
291 |
movdqa xmm1, [esi + 16] |
movdqa xmm1, [_EAX + 16] |
292 |
pcmpgtw xmm4,xmm0 |
pcmpgtw xmm4,xmm0 |
293 |
pxor xmm5,xmm5 |
pxor xmm5,xmm5 |
294 |
pmulhw xmm0,xmm7 |
pmulhw xmm0,xmm7 |
295 |
pcmpgtw xmm5,xmm1 |
pcmpgtw xmm5,xmm1 |
296 |
movdqa xmm2, [esi+32] |
movdqa xmm2, [_EAX+32] |
297 |
psubw xmm0,xmm4 |
psubw xmm0,xmm4 |
298 |
pmulhw xmm1,xmm7 |
pmulhw xmm1,xmm7 |
299 |
pxor xmm4,xmm4 |
pxor xmm4,xmm4 |
300 |
movdqa xmm3,[esi+48] |
movdqa xmm3,[_EAX+48] |
301 |
pcmpgtw xmm4,xmm2 |
pcmpgtw xmm4,xmm2 |
302 |
psubw xmm1,xmm5 |
psubw xmm1,xmm5 |
303 |
pmulhw xmm2,xmm7 |
pmulhw xmm2,xmm7 |
306 |
pmulhw xmm3,xmm7 |
pmulhw xmm3,xmm7 |
307 |
psubw xmm2,xmm4 |
psubw xmm2,xmm4 |
308 |
psubw xmm3,xmm5 |
psubw xmm3,xmm5 |
309 |
movdqa [edx], xmm0 |
movdqa [TMP1], xmm0 |
310 |
lea esi, [esi+64] |
lea _EAX, [_EAX+64] |
311 |
movdqa [edx + 16], xmm1 |
movdqa [TMP1 + 16], xmm1 |
312 |
movdqa [edx + 32], xmm2 |
movdqa [TMP1 + 32], xmm2 |
313 |
movdqa [edx + 48], xmm3 |
movdqa [TMP1 + 48], xmm3 |
314 |
|
|
315 |
dec ecx |
dec TMP0 |
316 |
lea edx, [edx+64] |
lea TMP1, [TMP1+64] |
317 |
jne .loop |
jne .loop |
318 |
jmp .end |
jmp .end |
319 |
|
|
320 |
.low |
.low: |
321 |
movd xmm7,ecx |
movd xmm7,TMP0d |
322 |
mov ecx,2 |
|
323 |
.loop_low |
mov TMP0, prm2 |
324 |
movdqa xmm0, [esi] |
push _EAX ; DC |
325 |
|
mov _EAX, TMP0 |
326 |
|
|
327 |
|
mov TMP0,2 |
328 |
|
.loop_low: |
329 |
|
movdqa xmm0, [_EAX] |
330 |
pxor xmm4,xmm4 |
pxor xmm4,xmm4 |
331 |
movdqa xmm1, [esi + 16] |
movdqa xmm1, [_EAX + 16] |
332 |
pcmpgtw xmm4,xmm0 |
pcmpgtw xmm4,xmm0 |
333 |
pxor xmm5,xmm5 |
pxor xmm5,xmm5 |
334 |
psubw xmm0,xmm4 |
psubw xmm0,xmm4 |
335 |
pcmpgtw xmm5,xmm1 |
pcmpgtw xmm5,xmm1 |
336 |
psraw xmm0,xmm7 |
psraw xmm0,xmm7 |
337 |
psubw xmm1,xmm5 |
psubw xmm1,xmm5 |
338 |
movdqa xmm2,[esi+32] |
movdqa xmm2,[_EAX+32] |
339 |
pxor xmm4,xmm4 |
pxor xmm4,xmm4 |
340 |
psraw xmm1,xmm7 |
psraw xmm1,xmm7 |
341 |
pcmpgtw xmm4,xmm2 |
pcmpgtw xmm4,xmm2 |
342 |
pxor xmm5,xmm5 |
pxor xmm5,xmm5 |
343 |
psubw xmm2,xmm4 |
psubw xmm2,xmm4 |
344 |
movdqa xmm3,[esi+48] |
movdqa xmm3,[_EAX+48] |
345 |
pcmpgtw xmm5,xmm3 |
pcmpgtw xmm5,xmm3 |
346 |
psraw xmm2,xmm7 |
psraw xmm2,xmm7 |
347 |
psubw xmm3,xmm5 |
psubw xmm3,xmm5 |
348 |
movdqa [edx], xmm0 |
movdqa [TMP1], xmm0 |
349 |
psraw xmm3,xmm7 |
psraw xmm3,xmm7 |
350 |
movdqa [edx+16], xmm1 |
movdqa [TMP1+16], xmm1 |
351 |
movdqa [edx+32],xmm2 |
movdqa [TMP1+32],xmm2 |
352 |
lea esi, [esi+64] |
lea _EAX, [_EAX+64] |
353 |
movdqa [edx+48],xmm3 |
movdqa [TMP1+48],xmm3 |
354 |
|
|
355 |
dec ecx |
dec TMP0 |
356 |
lea edx, [edx+64] |
lea TMP1, [TMP1+64] |
357 |
jne .loop_low |
jne .loop_low |
358 |
|
|
359 |
.end |
.end: |
360 |
mov edx, [esp + 4 + 4] ; coeff |
|
361 |
mov [edx],ax |
pop _EAX |
|
xor eax,eax ; return 0 |
|
362 |
|
|
363 |
pop esi |
mov TMP1, prm1 ; coeff |
364 |
|
mov [TMP1],ax |
365 |
|
xor _EAX,_EAX ; return 0 |
366 |
|
POP_XMM6_XMM7 |
367 |
ret |
ret |
368 |
|
ENDFUNC |
369 |
|
|
370 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
371 |
; |
; |
376 |
; |
; |
377 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
378 |
|
|
379 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
380 |
quant_h263_inter_mmx: |
quant_h263_inter_mmx: |
381 |
|
|
382 |
push ecx |
mov TMP1, prm1 ; coeff |
383 |
push esi |
mov _EAX, prm3 ; quant |
|
push edi |
|
|
|
|
|
mov edi, [esp + 12 + 4] ; coeff |
|
|
mov esi, [esp + 12 + 8] ; data |
|
|
mov eax, [esp + 12 + 12] ; quant |
|
|
|
|
|
xor ecx, ecx |
|
384 |
|
|
385 |
pxor mm5, mm5 ; sum |
pxor mm5, mm5 ; sum |
386 |
movq mm6, [mmx_sub + eax * 8 - 8] ; sub |
lea TMP0, [mmx_sub] |
387 |
|
movq mm6, [TMP0 + _EAX * 8 - 8] ; sub |
388 |
|
|
389 |
cmp al, 1 |
cmp al, 1 |
390 |
jz .q1loop |
jz near .q1routine |
391 |
|
|
392 |
|
lea TMP0, [mmx_div] |
393 |
|
movq mm7, [TMP0 + _EAX * 8 - 8] ; divider |
394 |
|
|
395 |
movq mm7, [mmx_div + eax * 8 - 8] ; divider |
xor TMP0, TMP0 |
396 |
|
mov _EAX, prm2 ; data |
397 |
|
|
398 |
ALIGN 8 |
ALIGN SECTION_ALIGN |
399 |
.loop |
.loop: |
400 |
movq mm0, [esi + 8*ecx] ; mm0 = [1st] |
movq mm0, [_EAX + 8*TMP0] ; mm0 = [1st] |
401 |
movq mm3, [esi + 8*ecx + 8] |
movq mm3, [_EAX + 8*TMP0 + 8] |
402 |
pxor mm1, mm1 ; mm1 = 0 |
pxor mm1, mm1 ; mm1 = 0 |
403 |
pxor mm4, mm4 ; |
pxor mm4, mm4 ; |
404 |
pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) |
pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) |
417 |
pxor mm3, mm4 ; |
pxor mm3, mm4 ; |
418 |
psubw mm0, mm1 ; undisplace |
psubw mm0, mm1 ; undisplace |
419 |
psubw mm3, mm4 |
psubw mm3, mm4 |
420 |
movq [edi + 8*ecx], mm0 |
movq [TMP1 + 8*TMP0], mm0 |
421 |
movq [edi + 8*ecx + 8], mm3 |
movq [TMP1 + 8*TMP0 + 8], mm3 |
422 |
|
|
423 |
add ecx, 2 |
add TMP0, 2 |
424 |
cmp ecx, 16 |
cmp TMP0, 16 |
425 |
jnz .loop |
jnz .loop |
426 |
|
|
427 |
.done |
.done: |
428 |
pmaddwd mm5, [plus_one] |
pmaddwd mm5, [plus_one] |
429 |
movq mm0, mm5 |
movq mm0, mm5 |
430 |
psrlq mm5, 32 |
psrlq mm5, 32 |
431 |
paddd mm0, mm5 |
paddd mm0, mm5 |
432 |
|
|
433 |
movd eax, mm0 ; return sum |
movd eax, mm0 ; return sum |
|
pop edi |
|
|
pop esi |
|
|
pop ecx |
|
434 |
|
|
435 |
ret |
ret |
436 |
|
|
437 |
ALIGN 8 |
.q1routine: |
438 |
.q1loop |
xor TMP0, TMP0 |
439 |
movq mm0, [esi + 8*ecx] ; mm0 = [1st] |
mov _EAX, prm2 ; data |
440 |
movq mm3, [esi + 8*ecx+ 8] ; |
|
441 |
|
ALIGN SECTION_ALIGN |
442 |
|
.q1loop: |
443 |
|
movq mm0, [_EAX + 8*TMP0] ; mm0 = [1st] |
444 |
|
movq mm3, [_EAX + 8*TMP0+ 8] ; |
445 |
pxor mm1, mm1 ; mm1 = 0 |
pxor mm1, mm1 ; mm1 = 0 |
446 |
pxor mm4, mm4 ; |
pxor mm4, mm4 ; |
447 |
pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) |
pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) |
460 |
pxor mm3, mm4 ; |
pxor mm3, mm4 ; |
461 |
psubw mm0, mm1 ; undisplace |
psubw mm0, mm1 ; undisplace |
462 |
psubw mm3, mm4 |
psubw mm3, mm4 |
463 |
movq [edi + 8*ecx], mm0 |
movq [TMP1 + 8*TMP0], mm0 |
464 |
movq [edi + 8*ecx + 8], mm3 |
movq [TMP1 + 8*TMP0 + 8], mm3 |
465 |
|
|
466 |
add ecx, 2 |
add TMP0, 2 |
467 |
cmp ecx, 16 |
cmp TMP0, 16 |
468 |
jnz .q1loop |
jnz .q1loop |
469 |
|
|
470 |
jmp .done |
jmp .done |
471 |
|
ENDFUNC |
472 |
|
|
473 |
|
|
474 |
|
|
481 |
; |
; |
482 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
483 |
|
|
484 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
485 |
quant_h263_inter_sse2: |
quant_h263_inter_sse2: |
486 |
|
PUSH_XMM6_XMM7 |
487 |
|
|
488 |
push esi |
mov TMP1, prm1 ; coeff |
489 |
push edi |
mov _EAX, prm3 ; quant |
|
|
|
|
mov edi, [esp + 8 + 4] ; coeff |
|
|
mov esi, [esp + 8 + 8] ; data |
|
|
mov eax, [esp + 8 + 12] ; quant |
|
|
|
|
|
xor ecx, ecx |
|
490 |
|
|
491 |
pxor xmm5, xmm5 ; sum |
pxor xmm5, xmm5 ; sum |
492 |
|
|
493 |
movq mm0, [mmx_sub + eax*8 - 8] ; sub |
lea TMP0, [mmx_sub] |
494 |
|
movq mm0, [TMP0 + _EAX*8 - 8] ; sub |
495 |
movq2dq xmm6, mm0 ; load into low 8 bytes |
movq2dq xmm6, mm0 ; load into low 8 bytes |
496 |
movlhps xmm6, xmm6 ; duplicate into high 8 bytes |
movlhps xmm6, xmm6 ; duplicate into high 8 bytes |
497 |
|
|
498 |
cmp al, 1 |
cmp al, 1 |
499 |
jz near .qes2_q1loop |
jz near .qes2_q1_routine |
500 |
|
|
501 |
|
.qes2_not1: |
502 |
|
lea TMP0, [mmx_div] |
503 |
|
movq mm0, [TMP0 + _EAX*8 - 8] ; divider |
504 |
|
|
505 |
|
xor TMP0, TMP0 |
506 |
|
mov _EAX, prm2 ; data |
507 |
|
|
|
.qes2_not1 |
|
|
movq mm0, [mmx_div + eax*8 - 8] ; divider |
|
508 |
movq2dq xmm7, mm0 |
movq2dq xmm7, mm0 |
509 |
movlhps xmm7, xmm7 |
movlhps xmm7, xmm7 |
510 |
|
|
511 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
512 |
.qes2_loop |
.qes2_loop: |
513 |
movdqa xmm0, [esi + ecx*8] ; xmm0 = [1st] |
movdqa xmm0, [_EAX + TMP0*8] ; xmm0 = [1st] |
514 |
movdqa xmm3, [esi + ecx*8 + 16] ; xmm3 = [2nd] |
movdqa xmm3, [_EAX + TMP0*8 + 16] ; xmm3 = [2nd] |
515 |
pxor xmm1, xmm1 |
pxor xmm1, xmm1 |
516 |
pxor xmm4, xmm4 |
pxor xmm4, xmm4 |
517 |
pcmpgtw xmm1, xmm0 |
pcmpgtw xmm1, xmm0 |
530 |
pxor xmm3, xmm4 |
pxor xmm3, xmm4 |
531 |
psubw xmm0, xmm1 |
psubw xmm0, xmm1 |
532 |
psubw xmm3, xmm4 |
psubw xmm3, xmm4 |
533 |
movdqa [edi + ecx*8], xmm0 |
movdqa [TMP1 + TMP0*8], xmm0 |
534 |
movdqa [edi + ecx*8 + 16], xmm3 |
movdqa [TMP1 + TMP0*8 + 16], xmm3 |
535 |
|
|
536 |
add ecx, 4 |
add TMP0, 4 |
537 |
cmp ecx, 16 |
cmp TMP0, 16 |
538 |
jnz .qes2_loop |
jnz .qes2_loop |
539 |
|
|
540 |
.qes2_done |
.qes2_done: |
541 |
movdqu xmm6, [plus_one] |
movdqu xmm6, [plus_one] |
542 |
pmaddwd xmm5, xmm6 |
pmaddwd xmm5, xmm6 |
543 |
movhlps xmm6, xmm5 |
movhlps xmm6, xmm5 |
550 |
|
|
551 |
movd eax, mm0 ; return sum |
movd eax, mm0 ; return sum |
552 |
|
|
553 |
pop edi |
POP_XMM6_XMM7 |
|
pop esi |
|
|
|
|
554 |
ret |
ret |
555 |
|
|
556 |
ALIGN 16 |
.qes2_q1_routine: |
557 |
.qes2_q1loop |
xor TMP0, TMP0 |
558 |
movdqa xmm0, [esi + ecx*8] ; xmm0 = [1st] |
mov _EAX, prm2 ; data |
559 |
movdqa xmm3, [esi + ecx*8 + 16] ; xmm3 = [2nd] |
|
560 |
|
ALIGN SECTION_ALIGN |
561 |
|
.qes2_q1loop: |
562 |
|
movdqa xmm0, [_EAX + TMP0*8] ; xmm0 = [1st] |
563 |
|
movdqa xmm3, [_EAX + TMP0*8 + 16] ; xmm3 = [2nd] |
564 |
pxor xmm1, xmm1 |
pxor xmm1, xmm1 |
565 |
pxor xmm4, xmm4 |
pxor xmm4, xmm4 |
566 |
pcmpgtw xmm1, xmm0 |
pcmpgtw xmm1, xmm0 |
579 |
pxor xmm3, xmm4 |
pxor xmm3, xmm4 |
580 |
psubw xmm0, xmm1 |
psubw xmm0, xmm1 |
581 |
psubw xmm3, xmm4 |
psubw xmm3, xmm4 |
582 |
movdqa [edi + ecx*8], xmm0 |
movdqa [TMP1 + TMP0*8], xmm0 |
583 |
movdqa [edi + ecx*8 + 16], xmm3 |
movdqa [TMP1 + TMP0*8 + 16], xmm3 |
584 |
|
|
585 |
add ecx, 4 |
add TMP0, 4 |
586 |
cmp ecx, 16 |
cmp TMP0, 16 |
587 |
jnz .qes2_q1loop |
jnz .qes2_q1loop |
588 |
jmp .qes2_done |
jmp .qes2_done |
589 |
|
ENDFUNC |
590 |
|
|
591 |
|
|
592 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
599 |
; |
; |
600 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
601 |
|
|
602 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
603 |
dequant_h263_intra_mmx: |
dequant_h263_intra_mmx: |
604 |
|
|
605 |
mov ecx, [esp+12] ; quant |
mov TMP0, prm3 ; quant |
606 |
mov eax, [esp+ 8] ; coeff |
mov _EAX, prm2 ; coeff |
607 |
pcmpeqw mm0,mm0 |
pcmpeqw mm0,mm0 |
608 |
movq mm6, [mmx_quant + ecx*8] ; quant |
lea TMP1, [mmx_quant] |
609 |
shl ecx,31 ; quant & 1 ? 0 : - 1 |
movq mm6, [TMP1 + TMP0*8] ; quant |
610 |
|
shl TMP0,31 ; quant & 1 ? 0 : - 1 |
611 |
movq mm7,mm6 |
movq mm7,mm6 |
612 |
movq mm5,mm0 |
movq mm5,mm0 |
613 |
movd mm1,ecx |
movd mm1,TMP0d |
614 |
mov edx, [esp+ 4] ; data |
mov TMP1, prm1 ; data |
615 |
psllw mm0,mm1 |
psllw mm0,mm1 |
616 |
paddw mm7,mm7 ; 2*quant |
paddw mm7,mm7 ; 2*quant |
617 |
paddw mm6,mm0 ; quant-1 |
paddw mm6,mm0 ; quant-1 |
618 |
psllw mm5,12 |
psllw mm5,12 |
619 |
mov ecx,8 |
mov TMP0,8 |
620 |
psrlw mm5,1 |
psrlw mm5,1 |
621 |
|
|
622 |
.loop: |
.loop: |
623 |
movq mm0,[eax] |
movq mm0,[_EAX] |
624 |
pxor mm2,mm2 |
pxor mm2,mm2 |
625 |
pxor mm4,mm4 |
pxor mm4,mm4 |
626 |
pcmpgtw mm2,mm0 |
pcmpgtw mm2,mm0 |
627 |
pcmpeqw mm4,mm0 |
pcmpeqw mm4,mm0 |
628 |
pmullw mm0,mm7 ; * 2 * quant |
pmullw mm0,mm7 ; * 2 * quant |
629 |
movq mm1,[eax+8] |
movq mm1,[_EAX+8] |
630 |
psubw mm0,mm2 |
psubw mm0,mm2 |
631 |
pxor mm2,mm6 |
pxor mm2,mm6 |
632 |
pxor mm3,mm3 |
pxor mm3,mm3 |
650 |
paddsw mm0, mm5 |
paddsw mm0, mm5 |
651 |
paddsw mm1, mm5 |
paddsw mm1, mm5 |
652 |
|
|
653 |
movq [edx],mm0 |
movq [TMP1],mm0 |
654 |
lea eax,[eax+16] |
lea _EAX,[_EAX+16] |
655 |
movq [edx+8],mm1 |
movq [TMP1+8],mm1 |
656 |
|
|
657 |
dec ecx |
dec TMP0 |
658 |
lea edx,[edx+16] |
lea TMP1,[TMP1+16] |
659 |
jne .loop |
jne .loop |
660 |
|
|
661 |
; deal with DC |
; deal with DC |
662 |
mov eax, [esp+ 8] ; coeff |
mov _EAX, prm2 ; coeff |
663 |
movsx eax,word [eax] |
movd mm1,prm4d ; dcscalar |
664 |
mov ecx,2047 |
movd mm0,[_EAX] ; coeff[0] |
665 |
imul dword [esp+16] ; dcscalar |
pmullw mm0,mm1 ; * dcscalar |
666 |
mov edx, [esp+ 4] ; data |
mov TMP1, prm1 ; data |
667 |
cmp eax,ecx |
paddsw mm0, mm5 ; saturate + |
668 |
cmovg eax,ecx |
psubsw mm0, mm5 |
669 |
not ecx |
psubsw mm0, mm5 ; saturate - |
670 |
cmp eax,ecx |
paddsw mm0, mm5 |
671 |
cmovl eax,ecx |
movd eax,mm0 |
672 |
|
mov [TMP1], ax |
|
mov [edx], ax |
|
673 |
|
|
674 |
xor eax, eax ; return 0 |
xor _EAX, _EAX ; return 0 |
675 |
ret |
ret |
676 |
|
ENDFUNC |
677 |
|
|
678 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
679 |
; |
; |
686 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
687 |
|
|
688 |
|
|
689 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
690 |
dequant_h263_intra_xmm: |
dequant_h263_intra_xmm: |
691 |
|
|
692 |
mov ecx, [esp+12] ; quant |
mov TMP0, prm3 ; quant |
693 |
mov eax, [esp+ 8] ; coeff |
mov _EAX, prm2 ; coeff |
694 |
|
|
695 |
movd mm6,ecx ; quant |
movd mm6,TMP0d ; quant |
696 |
pcmpeqw mm0,mm0 |
pcmpeqw mm0,mm0 |
697 |
pshufw mm6,mm6,0 ; all quant |
pshufw mm6,mm6,0 ; all quant |
698 |
shl ecx,31 |
shl TMP0,31 |
699 |
movq mm5,mm0 |
movq mm5,mm0 |
700 |
movq mm7,mm6 |
movq mm7,mm6 |
701 |
movd mm1,ecx |
movd mm1,TMP0d |
702 |
mov edx, [esp+ 4] ; data |
mov TMP1, prm1 ; data |
703 |
psllw mm0,mm1 ; quant & 1 ? 0 : - 1 |
psllw mm0,mm1 ; quant & 1 ? 0 : - 1 |
704 |
movq mm4,mm5 |
movq mm4,mm5 |
705 |
paddw mm7,mm7 ; quant*2 |
paddw mm7,mm7 ; quant*2 |
706 |
paddw mm6,mm0 ; quant-1 |
paddw mm6,mm0 ; quant-1 |
707 |
psrlw mm4,5 ; mm4=2047 |
psrlw mm4,5 ; mm4=2047 |
708 |
mov ecx,8 |
mov TMP0,8 |
709 |
pxor mm5,mm4 ; mm5=-2048 |
pxor mm5,mm4 ; mm5=-2048 |
710 |
|
|
711 |
.loop: |
.loop: |
712 |
movq mm0,[eax] |
movq mm0,[_EAX] |
713 |
pxor mm2,mm2 |
pxor mm2,mm2 |
714 |
pxor mm3,mm3 |
pxor mm3,mm3 |
715 |
|
|
716 |
pcmpgtw mm2,mm0 |
pcmpgtw mm2,mm0 |
717 |
pcmpeqw mm3,mm0 ; if coeff==0... |
pcmpeqw mm3,mm0 ; if coeff==0... |
718 |
pmullw mm0,mm7 ; * 2 * quant |
pmullw mm0,mm7 ; * 2 * quant |
719 |
movq mm1,[eax+8] |
movq mm1,[_EAX+8] |
720 |
|
|
721 |
psubw mm0,mm2 |
psubw mm0,mm2 |
722 |
pxor mm2,mm6 |
pxor mm2,mm6 |
736 |
|
|
737 |
pmaxsw mm0,mm5 |
pmaxsw mm0,mm5 |
738 |
pminsw mm1,mm4 |
pminsw mm1,mm4 |
739 |
movq [edx],mm0 |
movq [TMP1],mm0 |
740 |
pmaxsw mm1,mm5 |
pmaxsw mm1,mm5 |
741 |
lea eax,[eax+16] |
lea _EAX,[_EAX+16] |
742 |
movq [edx+8],mm1 |
movq [TMP1+8],mm1 |
743 |
|
|
744 |
dec ecx |
dec TMP0 |
745 |
lea edx,[edx+16] |
lea TMP1,[TMP1+16] |
746 |
jne .loop |
jne .loop |
747 |
|
|
748 |
; deal with DC |
; deal with DC |
749 |
mov eax, [esp+ 8] ; coeff |
mov _EAX, prm2 ; coeff |
750 |
mov edx, [esp+ 4] ; data |
movd mm1,prm4d ; dcscalar |
751 |
movd mm0, [eax] |
movd mm0, [_EAX] |
752 |
pmullw mm0, [esp+16] ; dcscalar |
pmullw mm0, mm1 |
753 |
|
mov TMP1, prm1 ; data |
754 |
pminsw mm0,mm4 |
pminsw mm0,mm4 |
755 |
pmaxsw mm0,mm5 |
pmaxsw mm0,mm5 |
756 |
movd eax, mm0 |
movd eax, mm0 |
757 |
mov [edx], ax |
mov [TMP1], ax |
758 |
|
|
759 |
xor eax, eax ; return 0 |
xor _EAX, _EAX ; return 0 |
760 |
ret |
ret |
761 |
|
ENDFUNC |
762 |
|
|
763 |
|
|
764 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
771 |
; |
; |
772 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
773 |
|
|
774 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
775 |
dequant_h263_intra_sse2: |
dequant_h263_intra_sse2: |
776 |
|
PUSH_XMM6_XMM7 |
777 |
|
|
778 |
|
mov TMP0, prm3 ; quant |
779 |
|
mov _EAX, prm2 ; coeff |
780 |
|
|
781 |
mov ecx, [esp+12] ; quant |
movd xmm6,TMP0d ; quant |
|
mov eax, [esp+ 8] ; coeff |
|
782 |
|
|
783 |
movd xmm6,ecx ; quant |
shl TMP0,31 |
|
; shr ecx,1 |
|
|
shl ecx,31 |
|
784 |
pshuflw xmm6,xmm6,0 |
pshuflw xmm6,xmm6,0 |
785 |
pcmpeqw xmm0,xmm0 |
pcmpeqw xmm0,xmm0 |
786 |
movlhps xmm6,xmm6 ; all quant |
movlhps xmm6,xmm6 ; all quant |
787 |
movd xmm1,ecx |
movd xmm1,TMP0d |
788 |
movdqa xmm5,xmm0 |
movdqa xmm5,xmm0 |
789 |
movdqa xmm7,xmm6 |
movdqa xmm7,xmm6 |
790 |
mov edx, [esp+ 4] ; data |
mov TMP1, prm1 ; data |
791 |
paddw xmm7,xmm7 ; quant *2 |
paddw xmm7,xmm7 ; quant *2 |
792 |
psllw xmm0,xmm1 ; quant & 1 ? 0 : - 1 |
psllw xmm0,xmm1 ; quant & 1 ? 0 : - 1 |
793 |
movdqa xmm4,xmm5 |
movdqa xmm4,xmm5 |
794 |
paddw xmm6,xmm0 ; quant-1 |
paddw xmm6,xmm0 ; quant-1 |
795 |
psrlw xmm4,5 ; 2047 |
psrlw xmm4,5 ; 2047 |
796 |
mov ecx,4 |
mov TMP0,4 |
797 |
pxor xmm5,xmm4 ; mm5=-2048 |
pxor xmm5,xmm4 ; mm5=-2048 |
798 |
|
|
799 |
.loop: |
.loop: |
800 |
movdqa xmm0,[eax] |
movdqa xmm0,[_EAX] |
801 |
pxor xmm2,xmm2 |
pxor xmm2,xmm2 |
802 |
pxor xmm3,xmm3 |
pxor xmm3,xmm3 |
803 |
|
|
804 |
pcmpgtw xmm2,xmm0 |
pcmpgtw xmm2,xmm0 |
805 |
pcmpeqw xmm3,xmm0 |
pcmpeqw xmm3,xmm0 |
806 |
pmullw xmm0,xmm7 ; * 2 * quant |
pmullw xmm0,xmm7 ; * 2 * quant |
807 |
movdqa xmm1,[eax+16] |
movdqa xmm1,[_EAX+16] |
808 |
|
|
809 |
psubw xmm0,xmm2 |
psubw xmm0,xmm2 |
810 |
pxor xmm2,xmm6 |
pxor xmm2,xmm6 |
824 |
|
|
825 |
pmaxsw xmm0,xmm5 |
pmaxsw xmm0,xmm5 |
826 |
pminsw xmm1,xmm4 |
pminsw xmm1,xmm4 |
827 |
movdqa [edx],xmm0 |
movdqa [TMP1],xmm0 |
828 |
pmaxsw xmm1,xmm5 |
pmaxsw xmm1,xmm5 |
829 |
lea eax,[eax+32] |
lea _EAX,[_EAX+32] |
830 |
movdqa [edx+16],xmm1 |
movdqa [TMP1+16],xmm1 |
831 |
|
|
832 |
dec ecx |
dec TMP0 |
833 |
lea edx,[edx+32] |
lea TMP1,[TMP1+32] |
834 |
jne .loop |
jne .loop |
835 |
|
|
836 |
; deal with DC |
; deal with DC |
837 |
|
|
838 |
mov eax, [esp+ 8] ; coeff |
mov _EAX, prm2 ; coeff |
839 |
movsx eax,word [eax] |
movsx _EAX,word [_EAX] |
840 |
imul dword [esp+16] ; dcscalar |
imul prm4d ; dcscalar |
841 |
mov edx, [esp+ 4] ; data |
mov TMP1, prm1 ; data |
842 |
movd xmm0,eax |
movd xmm0,eax |
843 |
pminsw xmm0,xmm4 |
pminsw xmm0,xmm4 |
844 |
pmaxsw xmm0,xmm5 |
pmaxsw xmm0,xmm5 |
845 |
movd eax,xmm0 |
movd eax,xmm0 |
846 |
|
|
847 |
mov [edx], ax |
mov [TMP1], ax |
848 |
|
|
849 |
xor eax, eax ; return 0 |
xor _EAX, _EAX ; return 0 |
850 |
|
|
851 |
|
POP_XMM6_XMM7 |
852 |
ret |
ret |
853 |
|
ENDFUNC |
854 |
|
|
855 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
856 |
; |
; |
861 |
; |
; |
862 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
863 |
|
|
864 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
865 |
dequant_h263_inter_mmx: |
dequant_h263_inter_mmx: |
866 |
|
|
867 |
mov ecx, [esp+12] ; quant |
mov TMP0, prm3 ; quant |
868 |
mov eax, [esp+ 8] ; coeff |
mov _EAX, prm2 ; coeff |
869 |
pcmpeqw mm0,mm0 |
pcmpeqw mm0,mm0 |
870 |
movq mm6, [mmx_quant + ecx*8] ; quant |
lea TMP1, [mmx_quant] |
871 |
shl ecx,31 ; odd/even |
movq mm6, [TMP1 + TMP0*8] ; quant |
872 |
|
shl TMP0,31 ; odd/even |
873 |
movq mm7,mm6 |
movq mm7,mm6 |
874 |
movd mm1,ecx |
movd mm1,TMP0d |
875 |
mov edx, [esp+ 4] ; data |
mov TMP1, prm1 ; data |
876 |
movq mm5,mm0 |
movq mm5,mm0 |
877 |
psllw mm0,mm1 ; quant & 1 ? 0 : - 1 |
psllw mm0,mm1 ; quant & 1 ? 0 : - 1 |
878 |
paddw mm7,mm7 ; quant*2 |
paddw mm7,mm7 ; quant*2 |
879 |
paddw mm6,mm0 ; quant & 1 ? quant : quant - 1 |
paddw mm6,mm0 ; quant & 1 ? quant : quant - 1 |
880 |
psllw mm5,12 |
psllw mm5,12 |
881 |
mov ecx,8 |
mov TMP0,8 |
882 |
psrlw mm5,1 ; 32767-2047 (32768-2048) |
psrlw mm5,1 ; 32767-2047 (32768-2048) |
883 |
|
|
884 |
.loop: |
.loop: |
885 |
movq mm0,[eax] |
movq mm0,[_EAX] |
886 |
pxor mm4,mm4 |
pxor mm4,mm4 |
887 |
pxor mm2,mm2 |
pxor mm2,mm2 |
888 |
pcmpeqw mm4,mm0 ; if coeff==0... |
pcmpeqw mm4,mm0 ; if coeff==0... |
890 |
pmullw mm0,mm7 ; * 2 * quant |
pmullw mm0,mm7 ; * 2 * quant |
891 |
pxor mm3,mm3 |
pxor mm3,mm3 |
892 |
psubw mm0,mm2 |
psubw mm0,mm2 |
893 |
movq mm1,[eax+8] |
movq mm1,[_EAX+8] |
894 |
pxor mm2,mm6 |
pxor mm2,mm6 |
895 |
pcmpgtw mm3,mm1 |
pcmpgtw mm3,mm1 |
896 |
pandn mm4,mm2 ; ... then data==0 |
pandn mm4,mm2 ; ... then data==0 |
912 |
paddsw mm0, mm5 |
paddsw mm0, mm5 |
913 |
paddsw mm1, mm5 |
paddsw mm1, mm5 |
914 |
|
|
915 |
movq [edx],mm0 |
movq [TMP1],mm0 |
916 |
lea eax,[eax+16] |
lea _EAX,[_EAX+16] |
917 |
movq [edx+8],mm1 |
movq [TMP1+8],mm1 |
918 |
|
|
919 |
dec ecx |
dec TMP0 |
920 |
lea edx,[edx+16] |
lea TMP1,[TMP1+16] |
921 |
jne .loop |
jne .loop |
922 |
|
|
923 |
xor eax, eax ; return 0 |
xor _EAX, _EAX ; return 0 |
924 |
ret |
ret |
925 |
|
ENDFUNC |
926 |
|
|
927 |
|
|
928 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
933 |
; const uint16_t *mpeg_matrices); |
; const uint16_t *mpeg_matrices); |
934 |
; |
; |
935 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
936 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
937 |
dequant_h263_inter_xmm: |
dequant_h263_inter_xmm: |
938 |
|
|
939 |
mov ecx, [esp+12] ; quant |
mov TMP0, prm3 ; quant |
940 |
mov eax, [esp+ 8] ; coeff |
mov _EAX, prm2 ; coeff |
941 |
pcmpeqw mm0,mm0 |
pcmpeqw mm0,mm0 |
942 |
movq mm6, [mmx_quant + ecx*8] ; quant |
lea TMP1, [mmx_quant] |
943 |
shl ecx,31 |
movq mm6, [TMP1 + TMP0*8] ; quant |
944 |
|
shl TMP0,31 |
945 |
movq mm5,mm0 |
movq mm5,mm0 |
946 |
movd mm1,ecx |
movd mm1,TMP0d |
947 |
movq mm7,mm6 |
movq mm7,mm6 |
948 |
psllw mm0,mm1 |
psllw mm0,mm1 |
949 |
mov edx, [esp+ 4] ; data |
mov TMP1, prm1 ; data |
950 |
movq mm4,mm5 |
movq mm4,mm5 |
951 |
paddw mm7,mm7 |
paddw mm7,mm7 |
952 |
paddw mm6,mm0 ; quant-1 |
paddw mm6,mm0 ; quant-1 |
953 |
|
|
954 |
psrlw mm4,5 |
psrlw mm4,5 |
955 |
mov ecx,8 |
mov TMP0,8 |
956 |
pxor mm5,mm4 ; mm5=-2048 |
pxor mm5,mm4 ; mm5=-2048 |
957 |
|
|
958 |
.loop: |
.loop: |
959 |
movq mm0,[eax] |
movq mm0,[_EAX] |
960 |
pxor mm3,mm3 |
pxor mm3,mm3 |
961 |
pxor mm2,mm2 |
pxor mm2,mm2 |
962 |
pcmpeqw mm3,mm0 |
pcmpeqw mm3,mm0 |
963 |
pcmpgtw mm2,mm0 |
pcmpgtw mm2,mm0 |
964 |
pmullw mm0,mm7 ; * 2 * quant |
pmullw mm0,mm7 ; * 2 * quant |
965 |
pandn mm3,mm6 |
pandn mm3,mm6 |
966 |
movq mm1,[eax+8] |
movq mm1,[_EAX+8] |
967 |
psubw mm0,mm2 |
psubw mm0,mm2 |
968 |
pxor mm2,mm3 |
pxor mm2,mm3 |
969 |
pxor mm3,mm3 |
pxor mm3,mm3 |
982 |
pmaxsw mm0,mm5 |
pmaxsw mm0,mm5 |
983 |
pmaxsw mm1,mm5 |
pmaxsw mm1,mm5 |
984 |
|
|
985 |
movq [edx],mm0 |
movq [TMP1],mm0 |
986 |
lea eax,[eax+16] |
lea _EAX,[_EAX+16] |
987 |
movq [edx+8],mm1 |
movq [TMP1+8],mm1 |
988 |
|
|
989 |
dec ecx |
dec TMP0 |
990 |
lea edx,[edx+16] |
lea TMP1,[TMP1+16] |
991 |
jne .loop |
jne .loop |
992 |
|
|
993 |
xor eax, eax ; return 0 |
xor _EAX, _EAX ; return 0 |
994 |
ret |
ret |
995 |
|
ENDFUNC |
996 |
|
|
997 |
|
|
998 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
1004 |
; |
; |
1005 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
1006 |
|
|
1007 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
1008 |
dequant_h263_inter_sse2: |
dequant_h263_inter_sse2: |
1009 |
|
PUSH_XMM6_XMM7 |
1010 |
|
|
1011 |
mov ecx, [esp+12] ; quant |
mov TMP0, prm3 ; quant |
1012 |
mov eax, [esp+ 8] ; coeff |
mov _EAX, prm2 ; coeff |
1013 |
|
|
1014 |
movq xmm6, [mmx_quant + ecx*8] ; quant |
lea TMP1, [mmx_quant] |
1015 |
inc ecx |
movq xmm6, [TMP1 + TMP0*8] ; quant |
1016 |
|
inc TMP0 |
1017 |
pcmpeqw xmm5,xmm5 |
pcmpeqw xmm5,xmm5 |
1018 |
and ecx,1 |
and TMP0,1 |
1019 |
movlhps xmm6,xmm6 |
movlhps xmm6,xmm6 |
1020 |
movd xmm0,ecx |
movd xmm0,TMP0d |
1021 |
movdqa xmm7,xmm6 |
movdqa xmm7,xmm6 |
1022 |
pshuflw xmm0,xmm0,0 |
pshuflw xmm0,xmm0,0 |
1023 |
movdqa xmm4,xmm5 |
movdqa xmm4,xmm5 |
1024 |
mov edx, [esp+ 4] ; data |
mov TMP1, prm1 ; data |
1025 |
movlhps xmm0,xmm0 |
movlhps xmm0,xmm0 |
1026 |
paddw xmm7,xmm7 |
paddw xmm7,xmm7 |
1027 |
psubw xmm6,xmm0 |
psubw xmm6,xmm0 |
1028 |
psrlw xmm4,5 ; 2047 |
psrlw xmm4,5 ; 2047 |
1029 |
mov ecx,4 |
mov TMP0,4 |
1030 |
pxor xmm5,xmm4 ; mm5=-2048 |
pxor xmm5,xmm4 ; mm5=-2048 |
1031 |
|
|
1032 |
.loop: |
.loop: |
1033 |
movdqa xmm0,[eax] |
movdqa xmm0,[_EAX] |
1034 |
pxor xmm3,xmm3 |
pxor xmm3,xmm3 |
1035 |
pxor xmm2,xmm2 |
pxor xmm2,xmm2 |
1036 |
pcmpeqw xmm3,xmm0 |
pcmpeqw xmm3,xmm0 |
1037 |
pcmpgtw xmm2,xmm0 |
pcmpgtw xmm2,xmm0 |
1038 |
pmullw xmm0,xmm7 ; * 2 * quant |
pmullw xmm0,xmm7 ; * 2 * quant |
1039 |
pandn xmm3,xmm6 |
pandn xmm3,xmm6 |
1040 |
movdqa xmm1,[eax+16] |
movdqa xmm1,[_EAX+16] |
1041 |
psubw xmm0,xmm2 |
psubw xmm0,xmm2 |
1042 |
pxor xmm2,xmm3 |
pxor xmm2,xmm3 |
1043 |
pxor xmm3,xmm3 |
pxor xmm3,xmm3 |
1056 |
pmaxsw xmm0,xmm5 |
pmaxsw xmm0,xmm5 |
1057 |
pmaxsw xmm1,xmm5 |
pmaxsw xmm1,xmm5 |
1058 |
|
|
1059 |
movdqa [edx],xmm0 |
movdqa [TMP1],xmm0 |
1060 |
lea eax,[eax+32] |
lea _EAX,[_EAX+32] |
1061 |
movdqa [edx+16],xmm1 |
movdqa [TMP1+16],xmm1 |
1062 |
|
|
1063 |
dec ecx |
dec TMP0 |
1064 |
lea edx,[edx+32] |
lea TMP1,[TMP1+32] |
1065 |
jne .loop |
jne .loop |
1066 |
|
|
1067 |
xor eax, eax ; return 0 |
xor _EAX, _EAX ; return 0 |
1068 |
|
|
1069 |
|
POP_XMM6_XMM7 |
1070 |
ret |
ret |
1071 |
|
ENDFUNC |
1072 |
|
|
1073 |
|
|
1074 |
|
%ifidn __OUTPUT_FORMAT__,elf |
1075 |
|
section ".note.GNU-stack" noalloc noexec nowrite progbits |
1076 |
|
%endif |
1077 |
|
|