19 |
* along with this program ; if not, write to the Free Software |
* along with this program ; if not, write to the Free Software |
20 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
21 |
* |
* |
22 |
* $Id: gmc.c,v 1.4 2006-06-07 21:00:55 Skal Exp $ |
* $Id: gmc.c,v 1.7 2006-11-07 19:59:03 Skal Exp $ |
23 |
* |
* |
24 |
****************************************************************************/ |
****************************************************************************/ |
25 |
|
|
27 |
#include "../global.h" |
#include "../global.h" |
28 |
#include "../encoder.h" |
#include "../encoder.h" |
29 |
#include "gmc.h" |
#include "gmc.h" |
30 |
|
#include "../utils/emms.h" |
31 |
|
|
32 |
#include <stdio.h> |
#include <stdio.h> |
33 |
|
|
34 |
|
/* initialized by init_GMC(), for 3points */ |
35 |
|
static |
36 |
|
void (*Predict_16x16_func)(const NEW_GMC_DATA * const This, |
37 |
|
uint8_t *dst, const uint8_t *src, |
38 |
|
int dststride, int srcstride, int x, int y, int rounding) = 0; |
39 |
|
static |
40 |
|
void (*Predict_8x8_func)(const NEW_GMC_DATA * const This, |
41 |
|
uint8_t *uDst, const uint8_t *uSrc, |
42 |
|
uint8_t *vDst, const uint8_t *vSrc, |
43 |
|
int dststride, int srcstride, int x, int y, int rounding) = 0; |
44 |
|
|
45 |
|
/****************************************************************************/ |
46 |
|
/* this is borrowed from bitstream.c until we find a common solution */ |
47 |
|
static uint32_t __inline |
48 |
|
log2bin(uint32_t value) |
49 |
|
{ |
50 |
|
/* Changed by Chenm001 */ |
51 |
|
#if !defined(_MSC_VER) |
52 |
|
int n = 0; |
53 |
|
|
54 |
|
while (value) { |
55 |
|
value >>= 1; |
56 |
|
n++; |
57 |
|
} |
58 |
|
return n; |
59 |
|
#else |
60 |
|
__asm { |
61 |
|
bsr eax, value |
62 |
|
inc eax |
63 |
|
} |
64 |
|
#endif |
65 |
|
} |
66 |
|
|
67 |
|
/* 16*sizeof(int) -> 1 or 2 cachelines */ |
68 |
|
/* table lookup might be faster! (still to be benchmarked) */ |
69 |
|
|
70 |
|
/* |
71 |
|
static int log2bin_table[16] = |
72 |
|
{ 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4}; |
73 |
|
*/ |
74 |
|
/* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 */ |
75 |
|
|
76 |
|
#define RDIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b)) |
77 |
|
#define RSHIFT(a,b) ( (a)>0 ? ((a) + (1<<((b)-1)))>>(b) : ((a) + (1<<((b)-1))-1)>>(b)) |
78 |
|
|
79 |
|
#define MLT(i) (((16-(i))<<16) + (i)) |
80 |
|
static const uint32_t MTab[16] = { |
81 |
|
MLT( 0), MLT( 1), MLT( 2), MLT( 3), MLT( 4), MLT( 5), MLT( 6), MLT( 7), |
82 |
|
MLT( 8), MLT( 9), MLT(10), MLT(11), MLT(12), MLT(13), MLT(14), MLT(15) |
83 |
|
}; |
84 |
|
#undef MLT |
85 |
|
|
86 |
/* ************************************************************ |
/* ************************************************************ |
87 |
* Pts = 2 or 3 |
* Pts = 2 or 3 |
88 |
* |
* |
91 |
* Conversely, *dst is the macroblock top-left adress. |
* Conversely, *dst is the macroblock top-left adress. |
92 |
*/ |
*/ |
93 |
|
|
94 |
|
static |
95 |
void Predict_16x16_C(const NEW_GMC_DATA * const This, |
void Predict_16x16_C(const NEW_GMC_DATA * const This, |
96 |
uint8_t *dst, const uint8_t *src, |
uint8_t *dst, const uint8_t *src, |
97 |
int dststride, int srcstride, int x, int y, int rounding) |
int dststride, int srcstride, int x, int y, int rounding) |
151 |
} |
} |
152 |
} |
} |
153 |
|
|
154 |
|
static |
155 |
void Predict_8x8_C(const NEW_GMC_DATA * const This, |
void Predict_8x8_C(const NEW_GMC_DATA * const This, |
156 |
uint8_t *uDst, const uint8_t *uSrc, |
uint8_t *uDst, const uint8_t *uSrc, |
157 |
uint8_t *vDst, const uint8_t *vSrc, |
uint8_t *vDst, const uint8_t *vSrc, |
231 |
} |
} |
232 |
} |
} |
233 |
|
|
234 |
|
static |
235 |
void get_average_mv_C(const NEW_GMC_DATA * const Dsp, VECTOR * const mv, |
void get_average_mv_C(const NEW_GMC_DATA * const Dsp, VECTOR * const mv, |
236 |
int x, int y, int qpel) |
int x, int y, int qpel) |
237 |
{ |
{ |
262 |
* simplified version for 1 warp point |
* simplified version for 1 warp point |
263 |
*/ |
*/ |
264 |
|
|
265 |
|
static |
266 |
void Predict_1pt_16x16_C(const NEW_GMC_DATA * const This, |
void Predict_1pt_16x16_C(const NEW_GMC_DATA * const This, |
267 |
uint8_t *Dst, const uint8_t *Src, |
uint8_t *Dst, const uint8_t *Src, |
268 |
int dststride, int srcstride, int x, int y, int rounding) |
int dststride, int srcstride, int x, int y, int rounding) |
314 |
} |
} |
315 |
} |
} |
316 |
|
|
317 |
|
static |
318 |
void Predict_1pt_8x8_C(const NEW_GMC_DATA * const This, |
void Predict_1pt_8x8_C(const NEW_GMC_DATA * const This, |
319 |
uint8_t *uDst, const uint8_t *uSrc, |
uint8_t *uDst, const uint8_t *uSrc, |
320 |
uint8_t *vDst, const uint8_t *vSrc, |
uint8_t *vDst, const uint8_t *vSrc, |
377 |
} |
} |
378 |
} |
} |
379 |
|
|
380 |
|
static |
381 |
void get_average_mv_1pt_C(const NEW_GMC_DATA * const Dsp, VECTOR * const mv, |
void get_average_mv_1pt_C(const NEW_GMC_DATA * const Dsp, VECTOR * const mv, |
382 |
int x, int y, int qpel) |
int x, int y, int qpel) |
383 |
{ |
{ |
385 |
mv->y = RSHIFT(Dsp->Vo<<qpel, 3); |
mv->y = RSHIFT(Dsp->Vo<<qpel, 3); |
386 |
} |
} |
387 |
|
|
388 |
|
#if defined(ARCH_IS_IA32) |
389 |
|
/* ************************************************************* |
390 |
|
* MMX core function |
391 |
|
*/ |
392 |
|
|
393 |
|
static |
394 |
|
void (*GMC_Core_Lin_8)(uint8_t *Dst, const uint16_t * Offsets, |
395 |
|
const uint8_t * const Src0, const int BpS, const int Rounder) = 0; |
396 |
|
|
397 |
|
extern void xvid_GMC_Core_Lin_8_mmx(uint8_t *Dst, const uint16_t * Offsets, |
398 |
|
const uint8_t * const Src0, const int BpS, const int Rounder); |
399 |
|
|
400 |
|
extern void xvid_GMC_Core_Lin_8_sse2(uint8_t *Dst, const uint16_t * Offsets, |
401 |
|
const uint8_t * const Src0, const int BpS, const int Rounder); |
402 |
|
|
403 |
|
/* *************************************************************/ |
404 |
|
|
405 |
|
static void GMC_Core_Non_Lin_8(uint8_t *Dst, |
406 |
|
const uint16_t * Offsets, |
407 |
|
const uint8_t * const Src0, const int srcstride, |
408 |
|
const int Rounder) |
409 |
|
{ |
410 |
|
int i; |
411 |
|
for(i=0; i<8; ++i) |
412 |
|
{ |
413 |
|
uint32_t u = Offsets[i ]; |
414 |
|
uint32_t v = Offsets[i+16]; |
415 |
|
const uint32_t ri = MTab[u&0x0f]; |
416 |
|
const uint32_t rj = MTab[v&0x0f]; |
417 |
|
uint32_t f0, f1; |
418 |
|
const uint8_t * const Src = Src0 + (u>>4) + (v>>4)*srcstride; |
419 |
|
f0 = Src[0]; |
420 |
|
f0 |= Src[1] << 16; |
421 |
|
f1 = Src[srcstride +0]; |
422 |
|
f1 |= Src[srcstride +1] << 16; |
423 |
|
f0 = (ri*f0)>>16; |
424 |
|
f1 = (ri*f1) & 0x0fff0000; |
425 |
|
f0 |= f1; |
426 |
|
f0 = ( rj*f0 + Rounder ) >> 24; |
427 |
|
Dst[i] = (uint8_t)f0; |
428 |
|
} |
429 |
|
} |
430 |
|
|
431 |
|
////////////////////////////////////////////////////////// |
432 |
|
|
433 |
|
static |
434 |
|
void Predict_16x16_mmx(const NEW_GMC_DATA * const This, |
435 |
|
uint8_t *dst, const uint8_t *src, |
436 |
|
int dststride, int srcstride, int x, int y, int rounding) |
437 |
|
{ |
438 |
|
const int W = This->sW; |
439 |
|
const int H = This->sH; |
440 |
|
const int rho = 3 - This->accuracy; |
441 |
|
const int Rounder = ( 128 - (rounding<<(2*rho)) ) << 16; |
442 |
|
const uint32_t W2 = W<<(16-rho); |
443 |
|
const uint32_t H2 = H<<(16-rho); |
444 |
|
|
445 |
|
const int dUx = This->dU[0]; |
446 |
|
const int dVx = This->dV[0]; |
447 |
|
const int dUy = This->dU[1]; |
448 |
|
const int dVy = This->dV[1]; |
449 |
|
|
450 |
|
int Uo = This->Uo + 16*(dUy*y + dUx*x); |
451 |
|
int Vo = This->Vo + 16*(dVy*y + dVx*x); |
452 |
|
|
453 |
|
int i, j; |
454 |
|
|
455 |
|
DECLARE_ALIGNED_MATRIX(Offsets, 2,16, uint16_t, CACHE_LINE); |
456 |
|
for(j=16; j>0; --j) |
457 |
|
{ |
458 |
|
int32_t U = Uo, V = Vo; |
459 |
|
Uo += dUy; Vo += dVy; |
460 |
|
if ( W2>(uint32_t)U && W2>(uint32_t)(U+15*dUx) && |
461 |
|
H2>(uint32_t)V && H2>(uint32_t)(V+15*dVx) ) |
462 |
|
{ |
463 |
|
uint32_t UV1, UV2; |
464 |
|
for(i=0; i<16; ++i) |
465 |
|
{ |
466 |
|
uint32_t u = ( U >> 16 ) << rho; |
467 |
|
uint32_t v = ( V >> 16 ) << rho; |
468 |
|
U += dUx; V += dVx; |
469 |
|
Offsets[ i] = u; |
470 |
|
Offsets[16+i] = v; |
471 |
|
} |
472 |
|
// batch 8 input pixels when linearity says it's ok |
473 |
|
|
474 |
|
UV1 = (Offsets[0] | (Offsets[16]<<16)) & 0xfff0fff0U; |
475 |
|
UV2 = (Offsets[7] | (Offsets[23]<<16)) & 0xfff0fff0U; |
476 |
|
if (UV1+7*16==UV2) |
477 |
|
GMC_Core_Lin_8(dst, Offsets, src + (Offsets[0]>>4) + (Offsets[16]>>4)*srcstride, srcstride, Rounder); |
478 |
|
else |
479 |
|
GMC_Core_Non_Lin_8(dst, Offsets, src, srcstride, Rounder); |
480 |
|
UV1 = (Offsets[ 8] | (Offsets[24]<<16)) & 0xfff0fff0U; |
481 |
|
UV2 = (Offsets[15] | (Offsets[31]<<16)) & 0xfff0fff0U; |
482 |
|
if (UV1+7*16==UV2) |
483 |
|
GMC_Core_Lin_8(dst+8, Offsets+8, src + (Offsets[8]>>4) + (Offsets[24]>>4)*srcstride, srcstride, Rounder); |
484 |
|
else |
485 |
|
GMC_Core_Non_Lin_8(dst+8, Offsets+8, src, srcstride, Rounder); |
486 |
|
} |
487 |
|
else |
488 |
|
{ |
489 |
|
for(i=0; i<16; ++i) |
490 |
|
{ |
491 |
|
int u = ( U >> 16 ) << rho; |
492 |
|
int v = ( V >> 16 ) << rho; |
493 |
|
U += dUx; V += dVx; |
494 |
|
|
495 |
|
Offsets[ i] = (u<0) ? 0 : (u>=W) ? W : u; |
496 |
|
Offsets[16+i] = (v<0) ? 0 : (v>=H) ? H : v; |
497 |
|
} |
498 |
|
// due to boundary clipping, we cannot infer the 8-pixels batchability |
499 |
|
// simply by using the linearity. Oh well, not a big deal... |
500 |
|
GMC_Core_Non_Lin_8(dst, Offsets, src, srcstride, Rounder); |
501 |
|
GMC_Core_Non_Lin_8(dst+8, Offsets+8, src, srcstride, Rounder); |
502 |
|
} |
503 |
|
dst += dststride; |
504 |
|
} |
505 |
|
} |
506 |
|
|
507 |
|
static |
508 |
|
void Predict_8x8_mmx(const NEW_GMC_DATA * const This, |
509 |
|
uint8_t *uDst, const uint8_t *uSrc, |
510 |
|
uint8_t *vDst, const uint8_t *vSrc, |
511 |
|
int dststride, int srcstride, int x, int y, int rounding) |
512 |
|
{ |
513 |
|
const int W = This->sW >> 1; |
514 |
|
const int H = This->sH >> 1; |
515 |
|
const int rho = 3-This->accuracy; |
516 |
|
const int32_t Rounder = ( 128 - (rounding<<(2*rho)) ) << 16; |
517 |
|
const uint32_t W2 = W<<(16-rho); |
518 |
|
const uint32_t H2 = H<<(16-rho); |
519 |
|
|
520 |
|
const int dUx = This->dU[0]; |
521 |
|
const int dVx = This->dV[0]; |
522 |
|
const int dUy = This->dU[1]; |
523 |
|
const int dVy = This->dV[1]; |
524 |
|
|
525 |
|
int Uo = This->Uco + 8*(dUy*y + dUx*x); |
526 |
|
int Vo = This->Vco + 8*(dVy*y + dVx*x); |
527 |
|
|
528 |
|
DECLARE_ALIGNED_MATRIX(Offsets, 2,16, uint16_t, CACHE_LINE); |
529 |
|
int i, j; |
530 |
|
for(j=8; j>0; --j) |
531 |
|
{ |
532 |
|
int32_t U = Uo, V = Vo; |
533 |
|
Uo += dUy; Vo += dVy; |
534 |
|
if ( W2>(uint32_t)U && W2>(uint32_t)(U+15*dUx) && |
535 |
|
H2>(uint32_t)V && H2>(uint32_t)(V+15*dVx) ) |
536 |
|
{ |
537 |
|
uint32_t UV1, UV2; |
538 |
|
for(i=0; i<8; ++i) |
539 |
|
{ |
540 |
|
int32_t u = ( U >> 16 ) << rho; |
541 |
|
int32_t v = ( V >> 16 ) << rho; |
542 |
|
U += dUx; V += dVx; |
543 |
|
Offsets[ i] = u; |
544 |
|
Offsets[16+i] = v; |
545 |
|
} |
546 |
|
|
547 |
|
// batch 8 input pixels when linearity says it's ok |
548 |
|
UV1 = (Offsets[ 0] | (Offsets[16]<<16)) & 0xfff0fff0U; |
549 |
|
UV2 = (Offsets[ 7] | (Offsets[23]<<16)) & 0xfff0fff0U; |
550 |
|
if (UV1+7*16==UV2) |
551 |
|
{ |
552 |
|
const uint32_t Off = (Offsets[0]>>4) + (Offsets[16]>>4)*srcstride; |
553 |
|
GMC_Core_Lin_8(uDst, Offsets, uSrc+Off, srcstride, Rounder); |
554 |
|
GMC_Core_Lin_8(vDst, Offsets, vSrc+Off, srcstride, Rounder); |
555 |
|
} |
556 |
|
else { |
557 |
|
GMC_Core_Non_Lin_8(uDst, Offsets, uSrc, srcstride, Rounder); |
558 |
|
GMC_Core_Non_Lin_8(vDst, Offsets, vSrc, srcstride, Rounder); |
559 |
|
} |
560 |
|
} |
561 |
|
else |
562 |
|
{ |
563 |
|
for(i=0; i<8; ++i) |
564 |
|
{ |
565 |
|
int u = ( U >> 16 ) << rho; |
566 |
|
int v = ( V >> 16 ) << rho; |
567 |
|
U += dUx; V += dVx; |
568 |
|
Offsets[ i] = (u<0) ? 0 : (u>=W) ? W : u; |
569 |
|
Offsets[16+i] = (v<0) ? 0 : (v>=H) ? H : v; |
570 |
|
} |
571 |
|
GMC_Core_Non_Lin_8(uDst, Offsets, uSrc, srcstride, Rounder); |
572 |
|
GMC_Core_Non_Lin_8(vDst, Offsets, vSrc, srcstride, Rounder); |
573 |
|
} |
574 |
|
uDst += dststride; |
575 |
|
vDst += dststride; |
576 |
|
} |
577 |
|
} |
578 |
|
|
579 |
|
#endif /* ARCH_IS_IA32 */ |
580 |
|
|
581 |
|
/* ************************************************************* |
582 |
|
* will initialize internal pointers |
583 |
|
*/ |
584 |
|
|
585 |
|
void init_GMC(const unsigned int cpu_flags) |
586 |
|
{ |
587 |
|
Predict_16x16_func = Predict_16x16_C; |
588 |
|
Predict_8x8_func = Predict_8x8_C; |
589 |
|
|
590 |
|
#if defined(ARCH_IS_IA32) |
591 |
|
if ((cpu_flags & XVID_CPU_MMX) || (cpu_flags & XVID_CPU_MMXEXT) || |
592 |
|
(cpu_flags & XVID_CPU_3DNOW) || (cpu_flags & XVID_CPU_3DNOWEXT) || |
593 |
|
(cpu_flags & XVID_CPU_SSE) || (cpu_flags & XVID_CPU_SSE2)) |
594 |
|
{ |
595 |
|
Predict_16x16_func = Predict_16x16_mmx; |
596 |
|
Predict_8x8_func = Predict_8x8_mmx; |
597 |
|
if (cpu_flags & XVID_CPU_SSE2) |
598 |
|
GMC_Core_Lin_8 = xvid_GMC_Core_Lin_8_sse2; |
599 |
|
else |
600 |
|
GMC_Core_Lin_8 = xvid_GMC_Core_Lin_8_mmx; |
601 |
|
} |
602 |
|
#endif |
603 |
|
} |
604 |
|
|
605 |
/* ************************************************************* |
/* ************************************************************* |
606 |
* Warning! It's Accuracy being passed, not 'resolution'! |
* Warning! It's Accuracy being passed, not 'resolution'! |
607 |
*/ |
*/ |
689 |
gmc->Uco = (gmc->Uco + gmc->dU[0] + gmc->dU[1])>>2; |
gmc->Uco = (gmc->Uco + gmc->dU[0] + gmc->dU[1])>>2; |
690 |
gmc->Vco = (gmc->Vco + gmc->dV[0] + gmc->dV[1])>>2; |
gmc->Vco = (gmc->Vco + gmc->dV[0] + gmc->dV[1])>>2; |
691 |
|
|
692 |
gmc->predict_16x16 = Predict_16x16_C; |
gmc->predict_16x16 = Predict_16x16_func; |
693 |
gmc->predict_8x8 = Predict_8x8_C; |
gmc->predict_8x8 = Predict_8x8_func; |
694 |
gmc->get_average_mv = get_average_mv_C; |
gmc->get_average_mv = get_average_mv_C; |
695 |
} |
} |
696 |
} |
} |
738 |
|
|
739 |
pMBs[mbnum].mcsel = 0; /* until mode decision */ |
pMBs[mbnum].mcsel = 0; /* until mode decision */ |
740 |
} |
} |
741 |
|
emms(); |
742 |
} |
} |