28 |
|
|
29 |
%macro cglobal 1 |
%macro cglobal 1 |
30 |
%ifdef PREFIX |
%ifdef PREFIX |
31 |
|
%ifdef MARK_FUNCS |
32 |
|
global _%1:function %1.endfunc-%1 |
33 |
|
%define %1 _%1:function %1.endfunc-%1 |
34 |
|
%else |
35 |
global _%1 |
global _%1 |
36 |
%define %1 _%1 |
%define %1 _%1 |
37 |
|
%endif |
38 |
|
%else |
39 |
|
%ifdef MARK_FUNCS |
40 |
|
global %1:function %1.endfunc-%1 |
41 |
%else |
%else |
42 |
global %1 |
global %1 |
43 |
%endif |
%endif |
44 |
|
%endif |
45 |
%endmacro |
%endmacro |
46 |
|
|
47 |
;============================================================================= |
;============================================================================= |
80 |
cglobal interpolate8x8_halfpel_v_3dne |
cglobal interpolate8x8_halfpel_v_3dne |
81 |
cglobal interpolate8x8_halfpel_hv_3dne |
cglobal interpolate8x8_halfpel_hv_3dne |
82 |
|
|
83 |
|
cglobal interpolate8x4_halfpel_h_3dne |
84 |
|
cglobal interpolate8x4_halfpel_v_3dne |
85 |
|
cglobal interpolate8x4_halfpel_hv_3dne |
86 |
|
|
87 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
88 |
; |
; |
89 |
; void interpolate8x8_halfpel_h_3dne(uint8_t * const dst, |
; void interpolate8x8_halfpel_h_3dne(uint8_t * const dst, |
158 |
lea ecx,[ecx+2*edx] |
lea ecx,[ecx+2*edx] |
159 |
COPY_H_SSE_RND1 |
COPY_H_SSE_RND1 |
160 |
ret |
ret |
161 |
|
.endfunc |
162 |
|
|
163 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
164 |
; |
; |
282 |
movq [ecx], mm4 |
movq [ecx], mm4 |
283 |
movq [ecx+edx], mm5 |
movq [ecx+edx], mm5 |
284 |
ret |
ret |
285 |
|
.endfunc |
286 |
|
|
287 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
288 |
; |
; |
414 |
lea ecx,[ecx+2*edx] |
lea ecx,[ecx+2*edx] |
415 |
COPY_HV_SSE_RND1 |
COPY_HV_SSE_RND1 |
416 |
ret |
ret |
417 |
|
.endfunc |
418 |
|
|
419 |
|
;----------------------------------------------------------------------------- |
420 |
|
; |
421 |
|
; void interpolate8x4_halfpel_h_3dne(uint8_t * const dst, |
422 |
|
; const uint8_t * const src, |
423 |
|
; const uint32_t stride, |
424 |
|
; const uint32_t rounding); |
425 |
|
; |
426 |
|
;----------------------------------------------------------------------------- |
427 |
|
|
428 |
|
ALIGN 16 |
429 |
|
interpolate8x4_halfpel_h_3dne: |
430 |
|
|
431 |
|
mov eax, [esp+ 8] ; Src |
432 |
|
mov edx, [esp+12] ; stride |
433 |
|
dec dword [esp+16]; rounding |
434 |
|
|
435 |
|
jz .rounding1 |
436 |
|
mov ecx, [esp+ 4] ; Dst |
437 |
|
|
438 |
|
COPY_H_SSE_RND0 0 |
439 |
|
lea ecx,[ecx+2*edx] |
440 |
|
COPY_H_SSE_RND0 1 |
441 |
|
ret |
442 |
|
|
443 |
|
.rounding1 |
444 |
|
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
445 |
|
mov ecx, [esp+ 4] ; Dst |
446 |
|
movq mm7, [mmx_one] |
447 |
|
COPY_H_SSE_RND1 |
448 |
|
lea ecx, [ecx+2*edx] |
449 |
|
COPY_H_SSE_RND1 |
450 |
|
ret |
451 |
|
.endfunc |
452 |
|
|
453 |
|
;----------------------------------------------------------------------------- |
454 |
|
; |
455 |
|
; void interpolate8x4_halfpel_v_3dne(uint8_t * const dst, |
456 |
|
; const uint8_t * const src, |
457 |
|
; const uint32_t stride, |
458 |
|
; const uint32_t rounding); |
459 |
|
; |
460 |
|
;----------------------------------------------------------------------------- |
461 |
|
|
462 |
|
ALIGN 16 |
463 |
|
interpolate8x4_halfpel_v_3dne: |
464 |
|
|
465 |
|
mov eax, [esp+ 8] ; Src |
466 |
|
mov edx, [esp+12] ; stride |
467 |
|
dec dword [esp+16]; rounding |
468 |
|
|
469 |
|
; we process 2 line at a time |
470 |
|
|
471 |
|
jz .rounding1 |
472 |
|
pxor mm2,mm2 |
473 |
|
movq mm0, [eax] |
474 |
|
movq mm1, [eax+edx] |
475 |
|
por mm2, [eax+2*edx] ; Something like preload (pipelining) |
476 |
|
mov ecx, [esp+ 4] ; Dst |
477 |
|
lea eax, [eax+2*edx] |
478 |
|
pxor mm4, mm4 |
479 |
|
pavgb mm0, mm1 |
480 |
|
pavgb mm1, mm2 |
481 |
|
movq [byte ecx], mm0 |
482 |
|
movq [ecx+edx], mm1 |
483 |
|
|
484 |
|
pxor mm6, mm6 |
485 |
|
add eax, edx |
486 |
|
lea ecx, [ecx+2*edx] |
487 |
|
movq mm3, [byte eax] |
488 |
|
por mm4, [eax+edx] |
489 |
|
lea eax, [eax+2*edx] |
490 |
|
pavgb mm2, mm3 |
491 |
|
pavgb mm3, mm4 |
492 |
|
movq [ecx], mm2 |
493 |
|
movq [ecx+edx], mm3 |
494 |
|
|
495 |
|
ret |
496 |
|
|
497 |
|
ALIGN 8 |
498 |
|
.rounding1 |
499 |
|
pcmpeqb mm0, mm0 |
500 |
|
psubusb mm0, [eax] ; eax==line0 |
501 |
|
add eax, edx ; eax==line1 |
502 |
|
mov ecx, [esp+ 4] ; Dst |
503 |
|
|
504 |
|
push esi |
505 |
|
|
506 |
|
pcmpeqb mm1, mm1 |
507 |
|
pcmpeqb mm2, mm2 |
508 |
|
mov esi, mm_minusone |
509 |
|
psubusb mm1, [byte eax] ; line1 |
510 |
|
psubusb mm2, [eax+edx] ; line2 |
511 |
|
lea eax, [eax+2*edx] ; eax==line3 |
512 |
|
movq mm6, [esi] |
513 |
|
movq mm7, [esi] |
514 |
|
pavgb mm0, mm1 |
515 |
|
pavgb mm1, mm2 |
516 |
|
psubusb mm6, mm0 |
517 |
|
psubusb mm7, mm1 |
518 |
|
movq [ecx], mm6 ; store line0 |
519 |
|
movq [ecx+edx], mm7 ; store line1 |
520 |
|
|
521 |
|
lea ecx, [ecx+2*edx] |
522 |
|
pcmpeqb mm3, mm3 |
523 |
|
pcmpeqb mm4, mm4 |
524 |
|
psubusb mm3, [eax] ; line3 |
525 |
|
psubusb mm4, [eax+edx] ; line4 |
526 |
|
lea eax, [eax+2*edx] ; eax==line 5 |
527 |
|
pavgb mm2, mm3 |
528 |
|
pavgb mm3, mm4 |
529 |
|
movq mm0, [esi] |
530 |
|
movq mm1, [esi] |
531 |
|
psubusb mm0, mm2 |
532 |
|
psubusb mm1, mm3 |
533 |
|
movq [ecx], mm0 |
534 |
|
movq [ecx+edx], mm1 |
535 |
|
|
536 |
|
pop esi |
537 |
|
|
538 |
|
ret |
539 |
|
|
540 |
|
.endfunc |
541 |
|
|
542 |
|
;----------------------------------------------------------------------------- |
543 |
|
; |
544 |
|
; void interpolate8x4_halfpel_hv_3dne(uint8_t * const dst, |
545 |
|
; const uint8_t * const src, |
546 |
|
; const uint32_t stride, |
547 |
|
; const uint32_t rounding); |
548 |
|
; |
549 |
|
; |
550 |
|
;----------------------------------------------------------------------------- |
551 |
|
|
552 |
|
ALIGN 16 |
553 |
|
interpolate8x4_halfpel_hv_3dne: |
554 |
|
mov eax, [esp+ 8] ; Src |
555 |
|
mov edx, [esp+12] ; stride |
556 |
|
dec dword [esp+16] ; rounding |
557 |
|
|
558 |
|
; loop invariants: mm2=(i+j+1)/2 and mm3= i^j |
559 |
|
movq mm2, [eax] |
560 |
|
movq mm3, [eax+1] |
561 |
|
movq mm6, mm2 |
562 |
|
pavgb mm2, mm3 |
563 |
|
pxor mm3, mm6 ; mm2/mm3 ready |
564 |
|
mov ecx, [esp+ 4] ; Dst |
565 |
|
movq mm7, [mmx_one] |
566 |
|
|
567 |
|
jz near .rounding1 |
568 |
|
lea ebp,[byte ebp] |
569 |
|
COPY_HV_SSE_RND0 |
570 |
|
lea ecx,[ecx+2*edx] |
571 |
|
COPY_HV_SSE_RND0 |
572 |
|
ret |
573 |
|
|
574 |
|
ALIGN 16 |
575 |
|
.rounding1 |
576 |
|
COPY_HV_SSE_RND1 |
577 |
|
lea ecx,[ecx+2*edx] |
578 |
|
COPY_HV_SSE_RND1 |
579 |
|
ret |
580 |
|
.endfunc |
581 |
|
|