--- branches/dev-api-3/xvidcore/src/motion/motion_comp.c 2003/01/03 12:43:38 756 +++ branches/dev-api-3/xvidcore/src/motion/motion_comp.c 2003/01/11 14:59:24 769 @@ -3,6 +3,8 @@ // 01.05.2002 updated MBMotionCompensationBVOP // 14.04.2002 bframe compensation +#include + #include "../encoder.h" #include "../utils/mbfunctions.h" #include "../image/interpolate8x8.h" @@ -10,6 +12,51 @@ #include "../utils/timer.h" #include "motion.h" +#ifndef ABS +#define ABS(X) (((X)>0)?(X):-(X)) +#endif +#ifndef SIGN +#define SIGN(X) (((X)>0)?1:-1) +#endif + + +/* This is borrowed from decoder.c */ +static __inline int gmc_sanitize(int value, int quarterpel, int fcode) +{ + int length = 1 << (fcode+4); + + if (quarterpel) value *= 2; + + if (value < -length) + return -length; + else if (value >= length) + return length-1; + else return value; +} + +/* And this is borrowed from bitstream.c until we find a common solution */ + +static uint32_t __inline +log2bin(uint32_t value) +{ +/* Changed by Chenm001 */ +#if !defined(_MSC_VER) + int n = 0; + + while (value) { + value >>= 1; + n++; + } + return n; +#else + __asm { + bsr eax, value + inc eax + } +#endif +} + + static __inline void compensate16x16_interpolate(int16_t * const dct_codes, uint8_t * const cur, @@ -22,17 +69,17 @@ uint32_t y, const int32_t dx, const int32_t dy, - const uint32_t stride, + const int32_t stride, const int quarterpel, const int reduced_resolution, - const uint32_t rounding) + const int32_t rounding) { const uint8_t * ptr; if (!reduced_resolution) { if(quarterpel) { - if (dx&3 | dy&3) { + if ((dx&3) | (dy&3)) { interpolate16x16_quarterpel(tmp - y * stride - x, (uint8_t *) ref, tmp + 32, tmp + 64, tmp + 96, x, y, dx, dy, stride, rounding); @@ -84,17 +131,17 @@ uint32_t y, const int32_t dx, const int32_t dy, - const uint32_t stride, - const uint32_t quarterpel, + const int32_t stride, + const int32_t quarterpel, const int reduced_resolution, - const uint32_t rounding) + const int32_t rounding) { const uint8_t * ptr; if (!reduced_resolution) { if(quarterpel) { - if (dx&3 | dy&3) { + if ((dx&3) | (dy&3)) { interpolate8x8_quarterpel(tmp - y*stride - x, (uint8_t *) ref, tmp + 32, tmp + 64, tmp + 96, x, y, dx, dy, stride, rounding); @@ -125,8 +172,8 @@ const uint32_t y, const int32_t dx, const int dy, - const uint32_t stride, - const uint32_t rounding) + const int32_t stride, + const int32_t rounding) { interpolate8x8_switch(cur, refn, x-1, y-1, dx, dy, stride, rounding); interpolate8x8_switch(cur, refn, x+7, y-1, dx, dy, stride, rounding); @@ -148,7 +195,7 @@ const IMAGE * const Ref, uint8_t * const temp, int16_t * const coeff, - const uint32_t stride, + const int32_t stride, const int rounding, const int rrv) { /* uv-block-based compensation */ @@ -158,7 +205,6 @@ interpolate8x8_switch2(temp, Ref->u, 8 * i, 8 * j, dx, dy, stride, rounding), stride); - transfer_8to16sub(coeff + 64, Cur->v + 8 * j * stride + 8 * i, interpolate8x8_switch2(temp, Ref->v, 8 * i, 8 * j, dx, dy, stride, rounding), @@ -183,42 +229,91 @@ void MBMotionCompensation(MACROBLOCK * const mb, - const uint32_t i, - const uint32_t j, - const IMAGE * const ref, - const IMAGE * const refh, - const IMAGE * const refv, - const IMAGE * const refhv, - IMAGE * const cur, - int16_t * dct_codes, - const uint32_t width, - const uint32_t height, - const uint32_t edged_width, - const int quarterpel, - const int reduced_resolution, - const uint32_t rounding) + const uint32_t i, + const uint32_t j, + const IMAGE * const ref, + const IMAGE * const refh, + const IMAGE * const refv, + const IMAGE * const refhv, + const IMAGE * const refGMC, + IMAGE * const cur, + int16_t * dct_codes, + const uint32_t width, + const uint32_t height, + const uint32_t edged_width, + const int32_t quarterpel, + const int reduced_resolution, + const int32_t rounding) { - int32_t dx = (quarterpel ? mb->qmvs[0].x : mb->mvs[0].x); - int32_t dy = (quarterpel ? mb->qmvs[0].y : mb->mvs[0].y); + int32_t dx; + int32_t dy; + + uint8_t * const tmp = refv->u; - if ( mb->mode == MODE_NOT_CODED && dx==0 && dy==0 && !reduced_resolution) { /* quick copy */ - transfer16x16_copy(cur->y + 16 * (i + j * edged_width), + if ( (!reduced_resolution) && (mb->mode == MODE_NOT_CODED) ) { /* quick copy for early SKIP */ +/* early SKIP is only activated in P-VOPs, not in S-VOPs, so mcsel can never be 1 */ + +/* if (mb->mcsel) { + transfer16x16_copy(cur->y + 16 * (i + j * edged_width), + refGMC->y + 16 * (i + j * edged_width), + edged_width); + transfer8x8_copy(cur->u + 8 * (i + j * edged_width/2), + refGMC->u + 8 * (i + j * edged_width/2), + edged_width / 2); + transfer8x8_copy(cur->v + 8 * (i + j * edged_width/2), + refGMC->v + 8 * (i + j * edged_width/2), + edged_width / 2); + } else +*/ + { + transfer16x16_copy(cur->y + 16 * (i + j * edged_width), ref->y + 16 * (i + j * edged_width), edged_width); - transfer8x8_copy(cur->u + 8 * (i + j * edged_width/2), + transfer8x8_copy(cur->u + 8 * (i + j * edged_width/2), ref->u + 8 * (i + j * edged_width/2), edged_width / 2); - transfer8x8_copy(cur->v + 8 * (i + j * edged_width/2), + transfer8x8_copy(cur->v + 8 * (i + j * edged_width/2), ref->v + 8 * (i + j * edged_width/2), edged_width / 2); + } return; } - if ((mb->mode == MODE_NOT_CODED || mb->mode == MODE_INTER || mb->mode == MODE_INTER_Q) /*&& !quarterpel*/) { + if ((mb->mode == MODE_NOT_CODED || mb->mode == MODE_INTER + || mb->mode == MODE_INTER_Q) /*&& !quarterpel*/) { - /* quick MODE_NOT_CODED for GMC with MV!=(0,0) is still needed */ + /* reduced resolution + GMC: not possible */ + + if (mb->mcsel) { + + /* call normal routine once, easier than "if (mcsel)"ing all the time */ + + transfer_8to16sub(&dct_codes[0*64], cur->y + 16*j*edged_width + 16*i, + refGMC->y + 16*j*edged_width + 16*i, edged_width); + transfer_8to16sub(&dct_codes[1*64], cur->y + 16*j*edged_width + 16*i+8, + refGMC->y + 16*j*edged_width + 16*i+8, edged_width); + transfer_8to16sub(&dct_codes[2*64], cur->y + (16*j+8)*edged_width + 16*i, + refGMC->y + (16*j+8)*edged_width + 16*i, edged_width); + transfer_8to16sub(&dct_codes[3*64], cur->y + (16*j+8)*edged_width + 16*i+8, + refGMC->y + (16*j+8)*edged_width + 16*i+8, edged_width); + +/* lumi is needed earlier for mode decision, but chroma should be done block-based, but it isn't, yet. */ + + transfer_8to16sub(&dct_codes[4 * 64], cur->u + 8 *j*edged_width/2 + 8*i, + refGMC->u + 8 *j*edged_width/2 + 8*i, edged_width/2); + + transfer_8to16sub(&dct_codes[5 * 64], cur->v + 8*j* edged_width/2 + 8*i, + refGMC->v + 8*j* edged_width/2 + 8*i, edged_width/2); + + return; + } + + /* ordinary compensation */ + + dx = (quarterpel ? mb->qmvs[0].x : mb->mvs[0].x); + dy = (quarterpel ? mb->qmvs[0].y : mb->mvs[0].y); if (reduced_resolution) { dx = RRV_MV_SCALEUP(dx); @@ -228,9 +323,10 @@ compensate16x16_interpolate(&dct_codes[0 * 64], cur->y, ref->y, refh->y, refv->y, refhv->y, tmp, 16 * i, 16 * j, dx, dy, edged_width, quarterpel, reduced_resolution, rounding); - - dx /= 1 + quarterpel; - dy /= 1 + quarterpel; + + dx /= (int)(1 + quarterpel); + dy /= (int)(1 + quarterpel); + dx = (dx >> 1) + roundtab_79[dx & 0x3]; dy = (dy >> 1) + roundtab_79[dy & 0x3]; @@ -259,7 +355,6 @@ CompensateChroma(dx, dy, i, j, cur, ref, tmp, &dct_codes[4 * 64], edged_width / 2, rounding, reduced_resolution); - } @@ -328,14 +423,14 @@ if (quarterpel) { - if (dx&3 | dy&3) { + if ((dx&3) | (dy&3)) { interpolate16x16_quarterpel(tmp - i * 16 - j * 16 * edged_width, (uint8_t *) f_ref->y, tmp + 32, tmp + 64, tmp + 96, 16*i, 16*j, dx, dy, edged_width, 0); ptr1 = tmp; } else ptr1 = f_ref->y + (16*j + dy/4)*edged_width + 16*i + dx/4; // fullpixel position - if (b_dx&3 | b_dy&3) { + if ((b_dx&3) | (b_dy&3)) { interpolate16x16_quarterpel(tmp - i * 16 - j * 16 * edged_width + 16, (uint8_t *) b_ref->y, tmp + 32, tmp + 64, tmp + 96, 16*i, 16*j, b_dx, b_dy, edged_width, 0); @@ -381,7 +476,7 @@ sumx += dx/2; sumy += dy/2; b_sumx += b_dx/2; b_sumy += b_dy/2; - if (dx&3 | dy&3) { + if ((dx&3) | (dy&3)) { interpolate8x8_quarterpel(tmp - (i * 16+(k&1)*8) - (j * 16+((k>>1)*8)) * edged_width, (uint8_t *) f_ref->y, tmp + 32, tmp + 64, tmp + 96, @@ -389,7 +484,7 @@ ptr1 = tmp; } else ptr1 = f_ref->y + (16*j + (k>>1)*8 + dy/4)*edged_width + 16*i + (k&1)*8 + dx/4; - if (b_dx&3 | b_dy&3) { + if ((b_dx&3) | (b_dy&3)) { interpolate8x8_quarterpel(tmp - (i * 16+(k&1)*8) - (j * 16+((k>>1)*8)) * edged_width + 16, (uint8_t *) b_ref->y, tmp + 16, tmp + 32, tmp + 48, @@ -436,3 +531,322 @@ dx, dy, edged_width / 2, 0), edged_width / 2); } + + + +void +generate_GMCparameters( const int num_wp, // [input]: number of warppoints + const int res, // [input]: resolution + const WARPPOINTS *const warp, // [input]: warp points + const int width, const int height, + GMC_DATA *const gmc) // [output] precalculated parameters +{ + +/* We follow mainly two sources: The original standard, which is ugly, and the + thesis from Andreas Dehnhardt, which is much nicer. + + Notation is: indices are written next to the variable, + primes in the standard are denoted by a suffix 'p'. + types are "c"=constant, "i"=input parameter, "f"=calculated, then fixed, + "o"=output data, " "=other, "u" = unused, "p"=calc for every pixel + +type | variable name | ISO name (TeX-style) | value or range | usage +------------------------------------------------------------------------------------- + c | H | H | [16 , ?] | image width (w/o edges) + c | W | W | [16 , ?] | image height (w/o edges) + + c | i0 | i_0 | 0 | ref. point #1, X + c | j0 | j_0 | 0 | ref. point #1, Y + c | i1 | i_1 | W | ref. point #2, X + c | j1 | j_1 | 0 | ref. point #2, Y + cu | i2 | i_2 | 0 | ref. point #3, X + cu | i2 | j_2 | H | ref. point #3, Y + + i | du0 | du[0] | [-16863,16863] | warp vector #1, Y + i | dv0 | dv[0] | [-16863,16863] | warp vector #1, Y + i | du1 | du[1] | [-16863,16863] | warp vector #2, Y + i | dv1 | dv[1] | [-16863,16863] | warp vector #2, Y + iu | du2 | du[2] | [-16863,16863] | warp vector #3, Y + iu | dv2 | dv[2] | [-16863,16863] | warp vector #3, Y + + i | s | s | {2,4,8,16} | interpol. resolution + f | sigma | - | log2(s) | X / s == X >> sigma + f | r | r | =16/s | complementary res. + f | rho | \rho | log2(r) | X / r == X >> rho + + f | i0s | i'_0 | | + f | j0s | j'_0 | | + f | i1s | i'_1 | | + f | j1s | j'_1 | | + f | i2s | i'_2 | | + f | j2s | j'_2 | | + + f | alpha | \alpha | | 2^{alpha-1} < W <= 2^alpha + f | beta | \beta | | 2^{beta-1} < H <= 2^beta + + f | Ws | W' | W = 2^{alpha} | scaled width + f | Hs | H' | W = 2^{beta} | scaled height + + f | i1ss | i''_1 | "virtual sprite stuff" + f | j1ss | j''_1 | "virtual sprite stuff" + f | i2ss | i''_2 | "virtual sprite stuff" + f | j2ss | j''_2 | "virtual sprite stuff" +*/ + +/* Some calculations are disabled because we only use 2 warppoints at the moment */ + + int du0 = warp->duv[0].x; + int dv0 = warp->duv[0].y; + int du1 = warp->duv[1].x; + int dv1 = warp->duv[1].y; +// int du2 = warp->duv[2].x; +// int dv2 = warp->duv[2].y; + + gmc->num_wp = num_wp; + + gmc->s = res; /* scaling parameters 2,4,8 or 16 */ + gmc->sigma = log2bin(res-1); /* log2bin(15)=4, log2bin(16)=5, log2bin(17)=5 */ + gmc->r = 16/res; + gmc->rho = 4 - gmc->sigma; /* = log2bin(r-1) */ + + gmc->W = width; + gmc->H = height; /* fixed reference coordinates */ + + gmc->alpha = log2bin(gmc->W-1); + gmc->Ws= 1<alpha; + +// gmc->beta = log2bin(gmc->H-1); +// gmc->Hs= 1<beta; + +// printf("du0=%d dv0=%d du1=%d dv1=%d s=%d sigma=%d W=%d alpha=%d, Ws=%d, rho=%d\n",du0,dv0,du1,dv1,gmc->s,gmc->sigma,gmc->W,gmc->alpha,gmc->Ws,gmc->rho); + + /* i2s is only needed for num_wp >= 3, etc. */ + /* the 's' values are in 1/s pel resolution */ + gmc->i0s = res/2 * ( du0 ); + gmc->j0s = res/2 * ( dv0 ); + gmc->i1s = res/2 * (2*width + du1 + du0 ); + gmc->j1s = res/2 * ( dv1 + dv0 ); +// gmc->i2s = res/2 * ( du2 + du0 ); +// gmc->j2s = res/2 * (2*height + dv2 + dv0 ); + + /* i2s and i2ss are only needed for num_wp == 3, etc. */ + + /* the 'ss' values are in 1/16 pel resolution */ + gmc->i1ss = 16*gmc->Ws + ((gmc->W-gmc->Ws)*(gmc->r*gmc->i0s) + gmc->Ws*(gmc->r*gmc->i1s - 16*gmc->W)) / gmc->W; + gmc->j1ss = ((gmc->W - gmc->Ws)*(gmc->r*gmc->j0s) + gmc->Ws*gmc->r*gmc->j1s) / gmc->W; + +// gmc->i2ss = ((gmc->H - gmc->Hs)*(gmc->r*gmc->i0s) + gmc->Hs*(gmc->r*gmc->i2s)) / gmc->H; +// gmc->j2ss = 16*gmc->Hs + ((gmc->H-gmc->Hs)*(gmc->r*gmc->j0s) + gmc->Ws*(gmc->r*gmc->j2s - 16*gmc->H)) / gmc->H; + + return; +} + + + +void +generate_GMCimage( const GMC_DATA *const gmc_data, // [input] precalculated data + const IMAGE *const pRef, // [input] + const int mb_width, + const int mb_height, + const int stride, + const int stride2, + const int fcode, // [input] some parameters... + const int32_t quarterpel, // [input] for rounding avgMV + const int reduced_resolution, // [input] ignored + const int32_t rounding, // [input] for rounding image data + MACROBLOCK *const pMBs, // [output] average motion vectors + IMAGE *const pGMC) // [output] full warped image +{ + + unsigned int mj,mi; + VECTOR avgMV; + + for (mj=0;mjW; + const int H = gmc_data->H; + + const int s = gmc_data->s; + const int sigma = gmc_data->sigma; + + const int r = gmc_data->r; + const int rho = gmc_data->rho; + + const int i0s = gmc_data->i0s; + const int j0s = gmc_data->j0s; + + const int i1ss = gmc_data->i1ss; + const int j1ss = gmc_data->j1ss; +// const int i2ss = gmc_data->i2ss; +// const int j2ss = gmc_data->j2ss; + + const int alpha = gmc_data->alpha; + const int Ws = gmc_data->Ws; + +// const int beta = gmc_data->beta; +// const int Hs = gmc_data->Hs; + + int I,J; + VECTOR avgMV = {0,0}; + + for (J=16*mj;J<16*(mj+1);J++) + for (I=16*mi;I<16*(mi+1);I++) + { + int F= i0s + ( ((-r*i0s+i1ss)*I + (r*j0s-j1ss)*J + (1<<(alpha+rho-1))) >> (alpha+rho) ); + int G= j0s + ( ((-r*j0s+j1ss)*I + (-r*i0s+i1ss)*J + (1<<(alpha+rho-1))) >> (alpha+rho) ); + +/* this naive implementation (with lots of multiplications) isn't slower (rather faster) than + working incremental. Don't ask me why... maybe the whole this is memory bound? */ + + const int ri= F & (s-1); // fractional part of pelwise MV X + const int rj= G & (s-1); // fractional part of pelwise MV Y + + int Y00,Y01,Y10,Y11; + +/* unclipped values are used for avgMV */ + avgMV.x += F-(I<>= sigma; + G >>= sigma; + +/* clip values to be in range. Since we have edges, clip to 1 less than lower boundary + this way positions F+1/G+1 are still right */ + + if (F< -1) + F=-1; + else if (F>W) + F=W; /* W or W-1 doesn't matter, so save 1 subtract ;-) */ + if (G< -1) + G=-1; + else if (G>H) + G=H; /* dito */ + + Y00 = pRef->y[ G*stride + F ]; // Lumi values + Y01 = pRef->y[ G*stride + F+1 ]; + Y10 = pRef->y[ G*stride + F+stride ]; + Y11 = pRef->y[ G*stride + F+stride+1 ]; + + /* bilinear interpolation */ + Y00 = ((s-ri)*Y00 + ri*Y01); + Y10 = ((s-ri)*Y10 + ri*Y11); + Y00 = ((s-rj)*Y00 + rj*Y10 + s*s/2 - rounding ) >> (sigma+sigma); + + pGMC->y[J*stride+I] = (uint8_t)Y00; /* output 1 Y-pixel */ + } + + +/* doing chroma _here_ is even more stupid and slow, because won't be used until Compensation and + most likely not even then (only if the block really _is_ GMC) +*/ + + for (J=8*mj;J<8*(mj+1);J++) /* this plays the role of j_c,i_c in the standard */ + for (I=8*mi;I<8*(mi+1);I++) /* For I_c we have to use I_c = 4*i_c+1 ! */ + { + /* same positions for both chroma components, U=Cb and V=Cr */ + int Fc=((-r*i0s+i1ss)*(4*I+1) + (r*j0s-j1ss)*(4*J+1) +2*Ws*r*i0s + -16*Ws +(1<<(alpha+rho+1)))>>(alpha+rho+2); + int Gc=((-r*j0s+j1ss)*(4*I+1) +(-r*i0s+i1ss)*(4*J+1) +2*Ws*r*j0s + -16*Ws +(1<<(alpha+rho+1))) >>(alpha+rho+2); + + const int ri= Fc & (s-1); // fractional part of pelwise MV X + const int rj= Gc & (s-1); // fractional part of pelwise MV Y + + int C00,C01,C10,C11; + + Fc >>= sigma; + Gc >>= sigma; + + if (Fc< -1) + Fc=-1; + else if (Fc>=W/2) + Fc=W/2; /* W or W-1 doesn't matter, so save 1 subtraction ;-) */ + if (Gc< -1) + Gc=-1; + else if (Gc>=H/2) + Gc=H/2; /* dito */ + +/* now calculate U data */ + C00 = pRef->u[ Gc*stride2 + Fc ]; // chroma-value Cb + C01 = pRef->u[ Gc*stride2 + Fc+1 ]; + C10 = pRef->u[ (Gc+1)*stride2 + Fc ]; + C11 = pRef->u[ (Gc+1)*stride2 + Fc+1 ]; + + /* bilinear interpolation */ + C00 = ((s-ri)*C00 + ri*C01); + C10 = ((s-ri)*C10 + ri*C11); + C00 = ((s-rj)*C00 + rj*C10 + s*s/2 - rounding ) >> (sigma+sigma); + + pGMC->u[J*stride2+I] = (uint8_t)C00; /* output 1 U-pixel */ + +/* now calculate V data */ + C00 = pRef->v[ Gc*stride2 + Fc ]; // chroma-value Cr + C01 = pRef->v[ Gc*stride2 + Fc+1 ]; + C10 = pRef->v[ (Gc+1)*stride2 + Fc ]; + C11 = pRef->v[ (Gc+1)*stride2 + Fc+1 ]; + + /* bilinear interpolation */ + C00 = ((s-ri)*C00 + ri*C01); + C10 = ((s-ri)*C10 + ri*C11); + C00 = ((s-rj)*C00 + rj*C10 + s*s/2 - rounding ) >> (sigma+sigma); + + pGMC->v[J*stride2+I] = (uint8_t)C00; /* output 1 V-pixel */ + } + + + +/* The average vector is rounded from 1/s-pel to 1/2 or 1/4 */ + if (quarterpel) + { /* >>8 because of 256 terms in sum, >>(sigma-2) to obtain 1/4th-pel */ + avgMV.x = ( (avgMV.x + (1<<(sigma+5)) )>>(sigma+6) ); + avgMV.y = ( (avgMV.y + (1<<(sigma+5)) )>>(sigma+6) ); + } + else + { /* >>8 because of 256 terms in sum, >>(sigma-1) to obtain 1/2th-pel */ + avgMV.x = ( (avgMV.x + (1<<(sigma+6)))>>(sigma+7) ); + avgMV.y = ( (avgMV.y + (1<<(sigma+6)))>>(sigma+7) ); + } /* TODO: Check if this is correct way of rounding */ + + return avgMV; /* clipping to fcode area is done outside! */ +} +