55 |
#define GET_REFERENCE(X, Y, REF) { \ |
#define GET_REFERENCE(X, Y, REF) { \ |
56 |
switch ( (((X)&1)<<1) + ((Y)&1) ) \ |
switch ( (((X)&1)<<1) + ((Y)&1) ) \ |
57 |
{ \ |
{ \ |
58 |
case 0 : REF = data->Ref + (X)/2 + ((Y)/2)*(data->iEdgedWidth); break; \ |
case 0 : REF = (uint8_t *)data->Ref + (X)/2 + ((Y)/2)*(data->iEdgedWidth); break; \ |
59 |
case 1 : REF = data->RefV + (X)/2 + (((Y)-1)/2)*(data->iEdgedWidth); break; \ |
case 1 : REF = (uint8_t *)data->RefV + (X)/2 + (((Y)-1)/2)*(data->iEdgedWidth); break; \ |
60 |
case 2 : REF = data->RefH + ((X)-1)/2 + ((Y)/2)*(data->iEdgedWidth); break; \ |
case 2 : REF = (uint8_t *)data->RefH + ((X)-1)/2 + ((Y)/2)*(data->iEdgedWidth); break; \ |
61 |
default : REF = data->RefHV + ((X)-1)/2 + (((Y)-1)/2)*(data->iEdgedWidth); break; \ |
default : REF = (uint8_t *)data->RefHV + ((X)-1)/2 + (((Y)-1)/2)*(data->iEdgedWidth); break; \ |
62 |
} \ |
} \ |
63 |
} |
} |
64 |
|
|
110 |
|
|
111 |
data->temp[0] = sad16v(data->Cur, Reference, data->iEdgedWidth, data->temp + 1); |
data->temp[0] = sad16v(data->Cur, Reference, data->iEdgedWidth, data->temp + 1); |
112 |
|
|
|
if(data->quarterpel) |
|
|
t = d_mv_bits(2*x - data->predQMV.x, 2*y - data->predQMV.y, data->iFcode); |
|
|
else |
|
113 |
t = d_mv_bits(x - data->predMV.x, y - data->predMV.y, data->iFcode); |
t = d_mv_bits(x - data->predMV.x, y - data->predMV.y, data->iFcode); |
114 |
|
data->temp[0] += data->lambda16 * t; |
115 |
data->temp[0] += lambda_vec16[data->iQuant] * t; |
data->temp[1] += data->lambda8 * t; |
|
data->temp[1] += lambda_vec8[data->iQuant] * t; |
|
116 |
|
|
117 |
if (data->temp[0] < data->iMinSAD[0]) { |
if (data->temp[0] < data->iMinSAD[0]) { |
118 |
data->iMinSAD[0] = data->temp[0]; |
data->iMinSAD[0] = data->temp[0]; |
147 |
default : Reference = data->RefHV + (x-1)/2 + ((y-1)/2)*(data->iEdgedWidth); break; |
default : Reference = data->RefHV + (x-1)/2 + ((y-1)/2)*(data->iEdgedWidth); break; |
148 |
} |
} |
149 |
|
|
150 |
if(data->quarterpel) |
sad = data->lambda16 * d_mv_bits(x - data->predMV.x, y - data->predMV.y, data->iFcode); |
|
sad = lambda_vec16[data->iQuant] * |
|
|
d_mv_bits(2*x - data->predQMV.x, 2*y - data->predQMV.y, data->iFcode); |
|
|
else |
|
|
sad = lambda_vec16[data->iQuant] * |
|
|
d_mv_bits(x - data->predMV.x, y - data->predMV.y, data->iFcode); |
|
|
|
|
151 |
sad += sad16(data->Cur, Reference, data->iEdgedWidth, MV_MAX_ERROR); |
sad += sad16(data->Cur, Reference, data->iEdgedWidth, MV_MAX_ERROR); |
152 |
|
|
153 |
if (sad < *(data->iMinSAD)) { |
if (sad < *(data->iMinSAD)) { |
177 |
switch( ((x&1)<<1) + (y&1) ) |
switch( ((x&1)<<1) + (y&1) ) |
178 |
{ |
{ |
179 |
case 0: // pure halfpel position - shouldn't happen during a refinement step |
case 0: // pure halfpel position - shouldn't happen during a refinement step |
180 |
GET_REFERENCE(halfpelMV.x, halfpelMV.y, (const uint8_t *) Reference); |
GET_REFERENCE(halfpelMV.x, halfpelMV.y, Reference); |
181 |
break; |
break; |
182 |
|
|
183 |
case 1: // x halfpel, y qpel - top or bottom during qpel refinement |
case 1: // x halfpel, y qpel - top or bottom during qpel refinement |
217 |
data->temp[0] = sad16v(data->Cur, Reference, data->iEdgedWidth, data->temp+1); |
data->temp[0] = sad16v(data->Cur, Reference, data->iEdgedWidth, data->temp+1); |
218 |
|
|
219 |
t = d_mv_bits(x - data->predQMV.x, y - data->predQMV.y, data->iFcode); |
t = d_mv_bits(x - data->predQMV.x, y - data->predQMV.y, data->iFcode); |
220 |
data->temp[0] += lambda_vec16[data->iQuant] * t; |
data->temp[0] += data->lambda16 * t; |
221 |
data->temp[1] += lambda_vec8[data->iQuant] * t; |
data->temp[1] += data->lambda8 * t; |
222 |
|
|
223 |
if (data->temp[0] < data->iMinSAD[0]) { |
if (data->temp[0] < data->iMinSAD[0]) { |
224 |
data->iMinSAD[0] = data->temp[0]; |
data->iMinSAD[0] = data->temp[0]; |
256 |
switch( ((x&1)<<1) + (y&1) ) |
switch( ((x&1)<<1) + (y&1) ) |
257 |
{ |
{ |
258 |
case 0: // pure halfpel position - shouldn't happen during a refinement step |
case 0: // pure halfpel position - shouldn't happen during a refinement step |
259 |
GET_REFERENCE(halfpelMV.x, halfpelMV.y, (const uint8_t *) Reference); |
GET_REFERENCE(halfpelMV.x, halfpelMV.y, Reference); |
260 |
break; |
break; |
261 |
|
|
262 |
case 1: // x halfpel, y qpel - top or bottom during qpel refinement |
case 1: // x halfpel, y qpel - top or bottom during qpel refinement |
293 |
break; |
break; |
294 |
} |
} |
295 |
|
|
296 |
sad = lambda_vec16[data->iQuant] * |
sad = data->lambda16 * |
297 |
d_mv_bits(x - data->predQMV.x, y - data->predQMV.y, data->iFcode); |
d_mv_bits(x - data->predQMV.x, y - data->predQMV.y, data->iFcode); |
298 |
sad += sad16(data->Cur, Reference, data->iEdgedWidth, MV_MAX_ERROR); |
sad += sad16(data->Cur, Reference, data->iEdgedWidth, MV_MAX_ERROR); |
299 |
|
|
312 |
if (( x > data->max_dx) || ( x < data->min_dx) |
if (( x > data->max_dx) || ( x < data->min_dx) |
313 |
|| ( y > data->max_dy) || (y < data->min_dy)) return; |
|| ( y > data->max_dy) || (y < data->min_dy)) return; |
314 |
|
|
315 |
sad = lambda_vec16[data->iQuant] * |
sad = sad16(data->Cur, data->Ref + x/2 + (y/2)*(data->iEdgedWidth), |
|
d_mv_bits(x - data->predMV.x, y - data->predMV.y, data->iFcode); |
|
|
|
|
|
sad += sad16(data->Cur, data->Ref + x/2 + (y/2)*(data->iEdgedWidth), |
|
316 |
data->iEdgedWidth, 256*4096); |
data->iEdgedWidth, 256*4096); |
317 |
|
|
318 |
if (sad < *(data->iMinSAD)) { |
if (sad < *(data->iMinSAD)) { |
347 |
default : ReferenceB = data->bRefHV + (xb-1)/2 + ((yb-1)/2)*(data->iEdgedWidth); break; |
default : ReferenceB = data->bRefHV + (xb-1)/2 + ((yb-1)/2)*(data->iEdgedWidth); break; |
348 |
} |
} |
349 |
|
|
350 |
sad = lambda_vec16[data->iQuant] * |
sad = data->lambda16 * |
351 |
( d_mv_bits(xf - data->predMV.x, yf - data->predMV.y, data->iFcode) + |
( d_mv_bits(xf - data->predMV.x, yf - data->predMV.y, data->iFcode) + |
352 |
d_mv_bits(xb - data->bpredMV.x, yb - data->bpredMV.y, data->iFcode) ); |
d_mv_bits(xb - data->bpredMV.x, yb - data->bpredMV.y, data->iFcode) ); |
353 |
|
|
370 |
|
|
371 |
if (( x > 31) || ( x < -32) || ( y > 31) || (y < -32)) return; |
if (( x > 31) || ( x < -32) || ( y > 31) || (y < -32)) return; |
372 |
|
|
373 |
sad = lambda_vec16[data->iQuant] * d_mv_bits(x, y, 1); |
sad = data->lambda16 * d_mv_bits(x, y, 1); |
374 |
|
|
375 |
for (k = 0; k < 4; k++) { |
for (k = 0; k < 4; k++) { |
376 |
mvs.x = data->directmvF[k].x + x; |
mvs.x = data->directmvF[k].x + x; |
425 |
|
|
426 |
if (( x > 31) || ( x < -32) || ( y > 31) || (y < -32)) return; |
if (( x > 31) || ( x < -32) || ( y > 31) || (y < -32)) return; |
427 |
|
|
428 |
sad = lambda_vec16[data->iQuant] * d_mv_bits(x, y, 1); |
sad = data->lambda16 * d_mv_bits(x, y, 1); |
429 |
|
|
430 |
mvs.x = data->directmvF[0].x + x; |
mvs.x = data->directmvF[0].x + x; |
431 |
b_mvs.x = ((x == 0) ? |
b_mvs.x = ((x == 0) ? |
482 |
} |
} |
483 |
|
|
484 |
sad = sad8(data->Cur, Reference, data->iEdgedWidth); |
sad = sad8(data->Cur, Reference, data->iEdgedWidth); |
485 |
|
sad += data->lambda8 * d_mv_bits(x - data->predMV.x, y - data->predMV.y, data->iFcode); |
|
if(data->quarterpel) |
|
|
sad += lambda_vec8[data->iQuant] * d_mv_bits(2*x - data->predQMV.x, 2*y - data->predQMV.y, data->iFcode); |
|
|
else |
|
|
sad += lambda_vec8[data->iQuant] * d_mv_bits(x - data->predMV.x, y - data->predMV.y, data->iFcode); |
|
486 |
|
|
487 |
if (sad < *(data->iMinSAD)) { |
if (sad < *(data->iMinSAD)) { |
488 |
*(data->iMinSAD) = sad; |
*(data->iMinSAD) = sad; |
511 |
switch( ((x&1)<<1) + (y&1) ) |
switch( ((x&1)<<1) + (y&1) ) |
512 |
{ |
{ |
513 |
case 0: // pure halfpel position - shouldn't happen during a refinement step |
case 0: // pure halfpel position - shouldn't happen during a refinement step |
514 |
GET_REFERENCE(halfpelMV.x, halfpelMV.y, (const uint8_t *) Reference); |
GET_REFERENCE(halfpelMV.x, halfpelMV.y, Reference); |
515 |
break; |
break; |
516 |
|
|
517 |
case 1: // x halfpel, y qpel - top or bottom during qpel refinement |
case 1: // x halfpel, y qpel - top or bottom during qpel refinement |
540 |
} |
} |
541 |
|
|
542 |
sad = sad8(data->Cur, Reference, data->iEdgedWidth); |
sad = sad8(data->Cur, Reference, data->iEdgedWidth); |
543 |
sad += lambda_vec8[data->iQuant] * d_mv_bits(x - data->predQMV.x, y - data->predQMV.y, data->iFcode); |
sad += data->lambda8 * d_mv_bits(x - data->predQMV.x, y - data->predQMV.y, data->iFcode); |
544 |
|
|
545 |
if (sad < *(data->iMinSAD)) { |
if (sad < *(data->iMinSAD)) { |
546 |
*(data->iMinSAD) = sad; |
*(data->iMinSAD) = sad; |
804 |
Data.temp = temp; |
Data.temp = temp; |
805 |
Data.iFcode = current->fcode; |
Data.iFcode = current->fcode; |
806 |
Data.rounding = pParam->m_rounding_type; |
Data.rounding = pParam->m_rounding_type; |
|
Data.quarterpel = pParam->m_quarterpel; |
|
807 |
|
|
808 |
if((qimage = (uint8_t *) malloc(32 * pParam->edged_width)) == NULL) |
if((qimage = (uint8_t *) malloc(32 * pParam->edged_width)) == NULL) |
809 |
return 1; // allocate some mem for qpel interpolated blocks |
return 1; // allocate some mem for qpel interpolated blocks |
823 |
|
|
824 |
if (!(current->global_flags & XVID_LUMIMASKING)) { |
if (!(current->global_flags & XVID_LUMIMASKING)) { |
825 |
pMB->dquant = NO_CHANGE; |
pMB->dquant = NO_CHANGE; |
826 |
pMB->quant = current->quant; } |
pMB->quant = current->quant; |
827 |
else |
} else { |
828 |
if (pMB->dquant != NO_CHANGE) { |
if (pMB->dquant != NO_CHANGE) { |
829 |
quant += DQtab[pMB->dquant]; |
quant += DQtab[pMB->dquant]; |
830 |
if (quant > 31) quant = 31; |
if (quant > 31) quant = 31; |
831 |
else if (quant < 1) quant = 1; |
else if (quant < 1) quant = 1; |
832 |
|
} |
833 |
pMB->quant = quant; |
pMB->quant = quant; |
834 |
} |
} |
835 |
|
|
836 |
//initial skip decision |
//initial skip decision |
837 |
|
|
838 |
if ((pMB->dquant == NO_CHANGE) && (sad00 <= MAX_SAD00_FOR_SKIP * pMB->quant) |
if (pMB->dquant == NO_CHANGE && sad00 < pMB->quant * INITIAL_SKIP_THRESH) |
839 |
&& (SkipDecisionP(pCurrent, pRef, x, y, pParam->edged_width, pMB->quant)) ) { |
if (SkipDecisionP(pCurrent, pRef, x, y, pParam->edged_width, pMB->quant)) { |
|
if (pMB->sad16 < pMB->quant * INITIAL_SKIP_THRESH) { |
|
840 |
SkipMacroblockP(pMB, sad00); |
SkipMacroblockP(pMB, sad00); |
841 |
continue; |
continue; |
|
sad00 = 256 * 4096; |
|
842 |
} |
} |
|
} else sad00 = 256*4096; // skip not allowed - for final skip decision |
|
843 |
|
|
844 |
SearchP(pRef->y, pRefH->y, pRefV->y, pRefHV->y, qimage, pCurrent, x, |
SearchP(pRef->y, pRefH->y, pRefV->y, pRefHV->y, qimage, pCurrent, x, |
845 |
y, current->motion_flags, pMB->quant, |
y, current->motion_flags, pMB->quant, |
847 |
current->global_flags & XVID_INTER4V, pMB); |
current->global_flags & XVID_INTER4V, pMB); |
848 |
|
|
849 |
/* final skip decision, a.k.a. "the vector you found, really that good?" */ |
/* final skip decision, a.k.a. "the vector you found, really that good?" */ |
850 |
if (sad00 < pMB->quant * MAX_SAD00_FOR_SKIP) |
if (pMB->dquant == NO_CHANGE && sad00 < pMB->quant * MAX_SAD00_FOR_SKIP |
851 |
if ((100*pMB->sad16)/(sad00+1) > FINAL_SKIP_THRESH) |
&& ((100*pMB->sad16)/(sad00+1) > FINAL_SKIP_THRESH) ) |
852 |
{ SkipMacroblockP(pMB, sad00); continue; } |
if (SkipDecisionP(pCurrent, pRef, x, y, pParam->edged_width, pMB->quant)) { |
853 |
|
SkipMacroblockP(pMB, sad00); |
854 |
|
continue; |
855 |
|
} |
856 |
|
|
857 |
/* finally, intra decision */ |
/* finally, intra decision */ |
858 |
|
|
975 |
Data->RefHV = pRefHV + (x + Data->iEdgedWidth*y) * 16; |
Data->RefHV = pRefHV + (x + Data->iEdgedWidth*y) * 16; |
976 |
Data->RefQ = pRefQ; |
Data->RefQ = pRefQ; |
977 |
|
|
978 |
Data->iQuant = iQuant; |
Data->lambda16 = lambda_vec16[iQuant]; |
979 |
|
|
980 |
if (!(MotionFlags & PMV_HALFPEL16)) { |
if (!(MotionFlags & PMV_HALFPEL16)) { |
981 |
Data->min_dx = EVEN(Data->min_dx); |
Data->min_dx = EVEN(Data->min_dx); |
991 |
for(i = 0; i < 5; i++) |
for(i = 0; i < 5; i++) |
992 |
Data->currentMV[i].x = Data->currentMV[i].y = 0; |
Data->currentMV[i].x = Data->currentMV[i].y = 0; |
993 |
|
|
|
if(Data->quarterpel) |
|
|
i = d_mv_bits(Data->predQMV.x, Data->predQMV.y, Data->iFcode); |
|
|
else |
|
994 |
i = d_mv_bits(Data->predMV.x, Data->predMV.y, Data->iFcode); |
i = d_mv_bits(Data->predMV.x, Data->predMV.y, Data->iFcode); |
|
|
|
995 |
Data->iMinSAD[0] = pMB->sad16 + lambda_vec16[iQuant] * i; |
Data->iMinSAD[0] = pMB->sad16 + lambda_vec16[iQuant] * i; |
996 |
Data->iMinSAD[1] = pMB->sad8[0] + lambda_vec8[iQuant] * i; |
Data->iMinSAD[1] = pMB->sad8[0] + lambda_vec8[iQuant] * i; |
997 |
Data->iMinSAD[2] = pMB->sad8[1]; |
Data->iMinSAD[2] = pMB->sad8[1]; |
1080 |
else |
else |
1081 |
CheckCandidate = CheckCandidate16no4v_qpel; |
CheckCandidate = CheckCandidate16no4v_qpel; |
1082 |
|
|
1083 |
|
Data->iMinSAD[0] -= lambda_vec16[iQuant] * |
1084 |
|
d_mv_bits(Data->predMV.x - Data->currentMV[0].x, Data->predMV.y - Data->currentMV[0].y, Data->iFcode); |
1085 |
|
Data->iMinSAD[1] -= lambda_vec8[iQuant] * |
1086 |
|
d_mv_bits(Data->predMV.x - Data->currentMV[1].x, Data->predMV.y - Data->currentMV[1].y, Data->iFcode); |
1087 |
|
|
1088 |
|
Data->iMinSAD[0] += lambda_vec16[iQuant] * |
1089 |
|
d_mv_bits(Data->predQMV.x - Data->currentQMV[0].x, Data->predMV.y - Data->currentQMV[0].y, Data->iFcode); |
1090 |
|
Data->iMinSAD[1] += lambda_vec8[iQuant] * |
1091 |
|
d_mv_bits(Data->predQMV.x - Data->currentQMV[1].x, Data->predMV.y - Data->currentQMV[1].y, Data->iFcode); |
1092 |
|
|
1093 |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 16, |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 16, |
1094 |
pParam->width, pParam->height, Data->iFcode, 0); // get real range |
pParam->width, pParam->height, Data->iFcode, 0); |
1095 |
|
|
1096 |
QuarterpelRefine(Data); |
QuarterpelRefine(Data); |
1097 |
} |
} |
1099 |
if (inter4v) { |
if (inter4v) { |
1100 |
SearchData Data8; |
SearchData Data8; |
1101 |
Data8.iFcode = Data->iFcode; |
Data8.iFcode = Data->iFcode; |
1102 |
Data8.iQuant = Data->iQuant; |
Data8.lambda8 = lambda_vec8[iQuant]; |
1103 |
Data8.iEdgedWidth = Data->iEdgedWidth; |
Data8.iEdgedWidth = Data->iEdgedWidth; |
1104 |
Search8(Data, 2*x, 2*y, MotionFlags, pParam, pMB, pMBs, 0, &Data8); |
Search8(Data, 2*x, 2*y, MotionFlags, pParam, pMB, pMBs, 0, &Data8); |
1105 |
Search8(Data, 2*x + 1, 2*y, MotionFlags, pParam, pMB, pMBs, 1, &Data8); |
Search8(Data, 2*x + 1, 2*y, MotionFlags, pParam, pMB, pMBs, 1, &Data8); |
1152 |
Data->iMinSAD = OldData->iMinSAD + 1 + block; |
Data->iMinSAD = OldData->iMinSAD + 1 + block; |
1153 |
Data->currentMV = OldData->currentMV + 1 + block; |
Data->currentMV = OldData->currentMV + 1 + block; |
1154 |
Data->currentQMV = OldData->currentQMV + 1 + block; |
Data->currentQMV = OldData->currentQMV + 1 + block; |
|
Data->quarterpel = OldData->quarterpel; |
|
1155 |
|
|
1156 |
if(Data->quarterpel) // add d_mv_bits[qpel] everywhere but not in 0 (it's already there) |
if(pParam->m_quarterpel) { |
1157 |
{ |
//it is qpel. substract d_mv_bits[qpel] from 0, add d_mv_bits[hpel] everywhere |
1158 |
if (block != 0) |
if (block == 0) |
1159 |
*(Data->iMinSAD) += lambda_vec8[Data->iQuant] * |
*(Data->iMinSAD) -= Data->lambda8 * |
1160 |
d_mv_bits(Data->currentQMV->x - Data->predQMV.x, |
d_mv_bits(Data->currentQMV->x - Data->predQMV.x, |
1161 |
Data->currentQMV->y - Data->predQMV.y, |
Data->currentQMV->y - Data->predQMV.y, |
1162 |
Data->iFcode); |
Data->iFcode); |
1163 |
|
|
1164 |
} else // add d_mv_bits[hpel] everywhere but not in 0 (it's already there) |
*(Data->iMinSAD) += Data->lambda8 * |
|
if (block != 0) |
|
|
*(Data->iMinSAD) += lambda_vec8[Data->iQuant] * |
|
1165 |
d_mv_bits(Data->currentMV->x - Data->predMV.x, |
d_mv_bits(Data->currentMV->x - Data->predMV.x, |
1166 |
Data->currentMV->y - Data->predMV.y, |
Data->currentMV->y - Data->predMV.y, |
1167 |
Data->iFcode); |
Data->iFcode); |
1168 |
|
} else //it is not qpel. add d_mv_bits[hpel] everywhere but not in 0 (it's already there) |
1169 |
|
if (block != 0) *(Data->iMinSAD) += Data->lambda8 * |
1170 |
|
d_mv_bits( Data->currentMV->x - Data->predMV.x, |
1171 |
|
Data->currentMV->y - Data->predMV.y, |
1172 |
|
Data->iFcode); |
1173 |
|
|
1174 |
|
|
1175 |
if (MotionFlags & (PMV_EXTSEARCH8|PMV_HALFPELREFINE8)) { |
if (MotionFlags & (PMV_EXTSEARCH8|PMV_HALFPELREFINE8)) { |
1176 |
|
|
1197 |
|
|
1198 |
(*MainSearchPtr)(Data->currentMV->x, Data->currentMV->y, Data, 255); |
(*MainSearchPtr)(Data->currentMV->x, Data->currentMV->y, Data, 255); |
1199 |
|
|
1200 |
if(*(Data->iMinSAD) < temp_sad) { //found a better match? |
if(*(Data->iMinSAD) < temp_sad) { |
1201 |
Data->currentQMV->x = 2 * Data->currentMV->x; // update our qpel vector |
Data->currentQMV->x = 2 * Data->currentMV->x; // update our qpel vector |
1202 |
Data->currentQMV->y = 2 * Data->currentMV->y; |
Data->currentQMV->y = 2 * Data->currentMV->y; |
1203 |
} |
} |
1214 |
} |
} |
1215 |
} |
} |
1216 |
|
|
1217 |
if((Data->quarterpel) && (!(Data->currentQMV->x & 1)) && (!(Data->currentQMV->y & 1)) && |
if(pParam->m_quarterpel) { |
1218 |
|
if((!(Data->currentQMV->x & 1)) && (!(Data->currentQMV->y & 1)) && |
1219 |
(MotionFlags & PMV_QUARTERPELREFINE8)) { |
(MotionFlags & PMV_QUARTERPELREFINE8)) { |
1220 |
|
|
1221 |
CheckCandidate = CheckCandidate8_qpel; |
CheckCandidate = CheckCandidate8_qpel; |
1222 |
|
Data->iMinSAD[0] -= Data->lambda8 * |
1223 |
|
d_mv_bits(Data->predMV.x - Data->currentMV[0].x, Data->predMV.y - Data->currentMV[0].y, Data->iFcode); |
1224 |
|
|
1225 |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 8, |
Data->iMinSAD[0] += Data->lambda8 * |
1226 |
pParam->width, pParam->height, OldData->iFcode, 0); // get real range |
d_mv_bits(Data->predQMV.x - Data->currentQMV[0].x, Data->predQMV.y - Data->currentQMV[0].y, Data->iFcode); |
1227 |
|
|
1228 |
QuarterpelRefine(Data); |
QuarterpelRefine(Data); |
1229 |
} |
} |
1230 |
} |
} |
1231 |
|
} |
1232 |
|
|
1233 |
if(pParam->m_quarterpel) { |
if(pParam->m_quarterpel) { |
1234 |
pMB->pmvs[block].x = Data->currentQMV->x - Data->predQMV.x; |
pMB->pmvs[block].x = Data->currentQMV->x - Data->predQMV.x; |
1242 |
pMB->mvs[block] = *(Data->currentMV); |
pMB->mvs[block] = *(Data->currentMV); |
1243 |
pMB->qmvs[block] = *(Data->currentQMV); |
pMB->qmvs[block] = *(Data->currentQMV); |
1244 |
|
|
1245 |
pMB->sad8[block] = 4 * (*Data->iMinSAD); // Isibaar: why? |
pMB->sad8[block] = 4 * (*Data->iMinSAD); |
1246 |
} |
} |
1247 |
|
|
1248 |
/* B-frames code starts here */ |
/* B-frames code starts here */ |
1355 |
|
|
1356 |
// three bits are needed to code backward mode. four for forward |
// three bits are needed to code backward mode. four for forward |
1357 |
// we treat the bits just like they were vector's |
// we treat the bits just like they were vector's |
1358 |
if (mode_current == MODE_FORWARD) *Data->iMinSAD += 4 * lambda_vec16[Data->iQuant]; |
if (mode_current == MODE_FORWARD) *Data->iMinSAD += 4 * Data->lambda16 * 2; |
1359 |
else *Data->iMinSAD += 3 * lambda_vec16[Data->iQuant]; |
else *Data->iMinSAD += 3 * Data->lambda16 * 2; |
|
|
|
1360 |
|
|
1361 |
if (*Data->iMinSAD < *best_sad) { |
if (*Data->iMinSAD < *best_sad) { |
1362 |
*best_sad = *Data->iMinSAD; |
*best_sad = *Data->iMinSAD; |
1363 |
pMB->mode = mode_current; |
pMB->mode = mode_current; |
1364 |
pMB->pmvs[0].x = Data->currentMV->x - predMV->x; |
pMB->pmvs[0].x = Data->currentMV->x - predMV->x; |
1365 |
pMB->pmvs[0].y = Data->currentMV->y - predMV->y; |
pMB->pmvs[0].y = Data->currentMV->y - predMV->y; |
1366 |
if (mode_current == MODE_FORWARD) pMB->mvs[0] = *Data->currentMV; |
if (mode_current == MODE_FORWARD) pMB->mvs[0] = *(Data->currentMV+2) = *Data->currentMV; |
1367 |
else pMB->b_mvs[0] = *Data->currentMV; |
else pMB->b_mvs[0] = *(Data->currentMV+1) = *Data->currentMV; //we store currmv for interpolate search |
1368 |
} |
} |
1369 |
|
|
1370 |
} |
} |
1441 |
(*CheckCandidate)(0, 0, 255, &k, Data); |
(*CheckCandidate)(0, 0, 255, &k, Data); |
1442 |
|
|
1443 |
// skip decision |
// skip decision |
1444 |
if (*Data->iMinSAD - 2 * lambda_vec16[Data->iQuant] < (int32_t)Data->iQuant * SKIP_THRESH_B) { |
if (*Data->iMinSAD - 2 * Data->lambda16 < (uint32_t)pMB->quant * SKIP_THRESH_B) { |
1445 |
//possible skip - checking chroma. everything copied from MC |
//possible skip - checking chroma. everything copied from MC |
1446 |
//this is not full chroma compensation, only it's fullpel approximation. should work though |
//this is not full chroma compensation, only it's fullpel approximation. should work though |
1447 |
int sum, dx, dy, b_dx, b_dy; |
int sum, dx, dy, b_dx, b_dy; |
1467 |
b_Ref->v + (y*8 + b_dy/2) * (Data->iEdgedWidth/2) + x*8 + b_dx/2, |
b_Ref->v + (y*8 + b_dy/2) * (Data->iEdgedWidth/2) + x*8 + b_dx/2, |
1468 |
Data->iEdgedWidth/2); |
Data->iEdgedWidth/2); |
1469 |
|
|
1470 |
if ((uint32_t) sum < MAX_CHROMA_SAD_FOR_SKIP * Data->iQuant) { |
if (sum < MAX_CHROMA_SAD_FOR_SKIP * pMB->quant) { |
1471 |
pMB->mode = MODE_DIRECT_NONE_MV; |
pMB->mode = MODE_DIRECT_NONE_MV; |
1472 |
return *Data->iMinSAD; |
return *Data->iMinSAD; |
1473 |
} |
} |
1486 |
|
|
1487 |
HalfpelRefine(Data); |
HalfpelRefine(Data); |
1488 |
|
|
1489 |
*Data->iMinSAD += 1 * lambda_vec16[Data->iQuant]; // one bit is needed to code direct mode. we treat this bit just like it was vector's |
*Data->iMinSAD += 1 * Data->lambda16 * 2; // one bit is needed to code direct mode |
1490 |
*best_sad = *Data->iMinSAD; |
*best_sad = *Data->iMinSAD; |
1491 |
|
|
1492 |
if (b_mb->mode == MODE_INTER4V) |
if (b_mb->mode == MODE_INTER4V) |
1542 |
int iDirection, i, j; |
int iDirection, i, j; |
1543 |
SearchData bData; |
SearchData bData; |
1544 |
|
|
1545 |
bData.iMinSAD = fData->iMinSAD; |
*(bData.iMinSAD = fData->iMinSAD) = 4096*256; |
|
*bData.iMinSAD = 4096*256; |
|
1546 |
bData.Cur = fData->Cur; |
bData.Cur = fData->Cur; |
1547 |
fData->iEdgedWidth = bData.iEdgedWidth = iEdgedWidth; |
fData->iEdgedWidth = bData.iEdgedWidth = iEdgedWidth; |
1548 |
bData.currentMV = fData->currentMV + 1; |
bData.currentMV = fData->currentMV + 1; |
1549 |
bData.iQuant = fData->iQuant; |
bData.lambda16 = fData->lambda16; |
1550 |
fData->iFcode = bData.bFcode = fcode; fData->bFcode = bData.iFcode = bcode; |
fData->iFcode = bData.bFcode = fcode; fData->bFcode = bData.iFcode = bcode; |
1551 |
|
|
1552 |
bData.bRef = fData->Ref = f_Ref + (x + y * iEdgedWidth) * 16; |
bData.bRef = fData->Ref = f_Ref + (x + y * iEdgedWidth) * 16; |
1561 |
bData.bpredMV = fData->predMV = *f_predMV; |
bData.bpredMV = fData->predMV = *f_predMV; |
1562 |
fData->bpredMV = bData.predMV = *b_predMV; |
fData->bpredMV = bData.predMV = *b_predMV; |
1563 |
|
|
1564 |
fData->currentMV[0] = pMB->mvs[0]; |
fData->currentMV[0] = fData->currentMV[3]; //forward search stored it's vector here. backward stored it in the place it's needed |
|
fData->currentMV[1] = pMB->b_mvs[0]; |
|
1565 |
get_range(&fData->min_dx, &fData->max_dx, &fData->min_dy, &fData->max_dy, x, y, 16, pParam->width, pParam->height, fcode, pParam->m_quarterpel); |
get_range(&fData->min_dx, &fData->max_dx, &fData->min_dy, &fData->max_dy, x, y, 16, pParam->width, pParam->height, fcode, pParam->m_quarterpel); |
1566 |
get_range(&bData.min_dx, &bData.max_dx, &bData.min_dy, &bData.max_dy, x, y, 16, pParam->width, pParam->height, bcode, pParam->m_quarterpel); |
get_range(&bData.min_dx, &bData.max_dx, &bData.min_dy, &bData.max_dy, x, y, 16, pParam->width, pParam->height, bcode, pParam->m_quarterpel); |
1567 |
|
|
1601 |
} while (!(iDirection)); |
} while (!(iDirection)); |
1602 |
|
|
1603 |
// two bits are needed to code interpolate mode. we treat the bits just like they were vector's |
// two bits are needed to code interpolate mode. we treat the bits just like they were vector's |
1604 |
*fData->iMinSAD += 2 * lambda_vec16[fData->iQuant]; |
*fData->iMinSAD += 2 * fData->lambda16 * 2; |
1605 |
if (*fData->iMinSAD < *best_sad) { |
if (*fData->iMinSAD < *best_sad) { |
1606 |
*best_sad = *fData->iMinSAD; |
*best_sad = *fData->iMinSAD; |
1607 |
pMB->mvs[0] = fData->currentMV[0]; |
pMB->mvs[0] = fData->currentMV[0]; |
1652 |
Data.iEdgedWidth = pParam->edged_width; |
Data.iEdgedWidth = pParam->edged_width; |
1653 |
Data.currentMV = currentMV; |
Data.currentMV = currentMV; |
1654 |
Data.iMinSAD = &iMinSAD; |
Data.iMinSAD = &iMinSAD; |
1655 |
Data.iQuant = frame->quant; |
Data.lambda16 = lambda_vec16[frame->quant]; |
1656 |
|
|
1657 |
// note: i==horizontal, j==vertical |
// note: i==horizontal, j==vertical |
1658 |
|
|
1671 |
} |
} |
1672 |
|
|
1673 |
Data.Cur = frame->image.y + (j * Data.iEdgedWidth + i) * 16; |
Data.Cur = frame->image.y + (j * Data.iEdgedWidth + i) * 16; |
1674 |
|
pMB->quant = frame->quant; |
1675 |
/* direct search comes first, because it (1) checks for SKIP-mode |
/* direct search comes first, because it (1) checks for SKIP-mode |
1676 |
and (2) sets very good predictions for forward and backward search */ |
and (2) sets very good predictions for forward and backward search */ |
1677 |
|
|
1768 |
Data->iMinSAD = OldData->iMinSAD + 1 + block; |
Data->iMinSAD = OldData->iMinSAD + 1 + block; |
1769 |
Data->currentMV = OldData->currentMV + 1 + block; |
Data->currentMV = OldData->currentMV + 1 + block; |
1770 |
Data->currentQMV = OldData->currentQMV + 1 + block; |
Data->currentQMV = OldData->currentQMV + 1 + block; |
|
Data->quarterpel = OldData->quarterpel; |
|
1771 |
|
|
|
if (block != 0) { |
|
1772 |
if(pParam->m_quarterpel) { |
if(pParam->m_quarterpel) { |
1773 |
*(Data->iMinSAD) += lambda_vec8[Data->iQuant] * |
//it is qpel. substract d_mv_bits[qpel] from 0, add d_mv_bits[hpel] everywhere |
1774 |
|
if (block == 0) |
1775 |
|
*(Data->iMinSAD) -= Data->lambda8 * |
1776 |
d_mv_bits( Data->currentQMV->x - Data->predQMV.x, |
d_mv_bits( Data->currentQMV->x - Data->predQMV.x, |
1777 |
Data->currentQMV->y - Data->predQMV.y, |
Data->currentQMV->y - Data->predQMV.y, |
1778 |
Data->iFcode); |
Data->iFcode); |
1779 |
} |
|
1780 |
else { |
*(Data->iMinSAD) += Data->lambda8 * |
|
*(Data->iMinSAD) += lambda_vec8[Data->iQuant] * |
|
1781 |
d_mv_bits( Data->currentMV->x - Data->predMV.x, |
d_mv_bits( Data->currentMV->x - Data->predMV.x, |
1782 |
Data->currentMV->y - Data->predMV.y, |
Data->currentMV->y - Data->predMV.y, |
1783 |
Data->iFcode); |
Data->iFcode); |
1784 |
} |
} else //it is not qpel. add d_mv_bits[hpel] everywhere but not in 0 (it's already there) |
1785 |
} |
if (block != 0) *(Data->iMinSAD) += Data->lambda8 * |
1786 |
|
d_mv_bits( Data->currentMV->x - Data->predMV.x, |
1787 |
|
Data->currentMV->y - Data->predMV.y, |
1788 |
|
Data->iFcode); |
1789 |
|
|
1790 |
|
|
1791 |
Data->Ref = OldData->Ref + 8 * ((block&1) + pParam->edged_width*(block>>1)); |
Data->Ref = OldData->Ref + 8 * ((block&1) + pParam->edged_width*(block>>1)); |
1792 |
Data->RefH = OldData->RefH + 8 * ((block&1) + pParam->edged_width*(block>>1)); |
Data->RefH = OldData->RefH + 8 * ((block&1) + pParam->edged_width*(block>>1)); |
1825 |
} |
} |
1826 |
} |
} |
1827 |
|
|
1828 |
if((Data->quarterpel) && (!(Data->currentQMV->x & 1)) && (!(Data->currentQMV->y & 1)) && |
if(pParam->m_quarterpel) { |
1829 |
|
if((!(Data->currentQMV->x & 1)) && (!(Data->currentQMV->y & 1)) && |
1830 |
(MotionFlags & PMV_QUARTERPELREFINE8)) { |
(MotionFlags & PMV_QUARTERPELREFINE8)) { |
1831 |
|
|
1832 |
CheckCandidate = CheckCandidate8_qpel; |
CheckCandidate = CheckCandidate8_qpel; |
1833 |
|
Data->iMinSAD[0] -= Data->lambda8 * |
1834 |
|
d_mv_bits(Data->predMV.x - Data->currentMV[0].x, Data->predMV.y - Data->currentMV[0].y, Data->iFcode); |
1835 |
|
|
1836 |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 8, |
Data->iMinSAD[0] += Data->lambda8 * |
1837 |
pParam->width, pParam->height, OldData->iFcode, 0); // get real range |
d_mv_bits(Data->predQMV.x - Data->currentQMV[0].x, Data->predQMV.y - Data->currentQMV[0].y, Data->iFcode); |
1838 |
|
|
1839 |
QuarterpelRefine(Data); |
QuarterpelRefine(Data); |
1840 |
} |
} |
|
|
|
|
if(pParam->m_quarterpel) { |
|
1841 |
pMB->pmvs[block].x = Data->currentQMV->x - Data->predQMV.x; |
pMB->pmvs[block].x = Data->currentQMV->x - Data->predQMV.x; |
1842 |
pMB->pmvs[block].y = Data->currentQMV->y - Data->predQMV.y; |
pMB->pmvs[block].y = Data->currentQMV->y - Data->predQMV.y; |
1843 |
} |
} else { |
|
else { |
|
1844 |
pMB->pmvs[block].x = Data->currentMV->x - Data->predMV.x; |
pMB->pmvs[block].x = Data->currentMV->x - Data->predMV.x; |
1845 |
pMB->pmvs[block].y = Data->currentMV->y - Data->predMV.y; |
pMB->pmvs[block].y = Data->currentMV->y - Data->predMV.y; |
1846 |
} |
} |
1857 |
const uint8_t * const pRefH, |
const uint8_t * const pRefH, |
1858 |
const uint8_t * const pRefV, |
const uint8_t * const pRefV, |
1859 |
const uint8_t * const pRefHV, |
const uint8_t * const pRefHV, |
|
const uint8_t * const pRefQ, |
|
1860 |
const IMAGE * const pCur, |
const IMAGE * const pCur, |
1861 |
const int x, |
const int x, |
1862 |
const int y, |
const int y, |
1874 |
int i, t; |
int i, t; |
1875 |
MainSearchFunc * MainSearchPtr; |
MainSearchFunc * MainSearchPtr; |
1876 |
|
|
|
Data->predQMV = get_qpmv2(pMBs, pParam->mb_width, 0, x, y, 0); |
|
1877 |
Data->predMV = get_pmv2(pMBs, pParam->mb_width, 0, x, y, 0); |
Data->predMV = get_pmv2(pMBs, pParam->mb_width, 0, x, y, 0); |
1878 |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 16, |
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 16, |
1879 |
pParam->width, pParam->height, Data->iFcode, pParam->m_quarterpel); |
pParam->width, pParam->height, Data->iFcode, pParam->m_quarterpel); |
1883 |
Data->RefH = pRefH + (x + iEdgedWidth*y) * 16; |
Data->RefH = pRefH + (x + iEdgedWidth*y) * 16; |
1884 |
Data->RefV = pRefV + (x + iEdgedWidth*y) * 16; |
Data->RefV = pRefV + (x + iEdgedWidth*y) * 16; |
1885 |
Data->RefHV = pRefHV + (x + iEdgedWidth*y) * 16; |
Data->RefHV = pRefHV + (x + iEdgedWidth*y) * 16; |
1886 |
Data->RefQ = pRefQ; |
Data->lambda16 = lambda_vec16[iQuant]; |
|
|
|
|
Data->iQuant = iQuant; |
|
1887 |
|
|
1888 |
if (!(MotionFlags & PMV_HALFPEL16)) { |
if (!(MotionFlags & PMV_HALFPEL16)) { |
1889 |
Data->min_dx = EVEN(Data->min_dx); |
Data->min_dx = EVEN(Data->min_dx); |
1934 |
} |
} |
1935 |
|
|
1936 |
if((pParam->m_quarterpel) && (MotionFlags & PMV_QUARTERPELREFINE16)) { |
if((pParam->m_quarterpel) && (MotionFlags & PMV_QUARTERPELREFINE16)) { |
1937 |
|
|
1938 |
if(inter4v) |
if(inter4v) |
1939 |
CheckCandidate = CheckCandidate16_qpel; |
CheckCandidate = CheckCandidate16_qpel; |
1940 |
else |
else |
1941 |
CheckCandidate = CheckCandidate16no4v_qpel; |
CheckCandidate = CheckCandidate16no4v_qpel; |
1942 |
|
|
|
get_range(&Data->min_dx, &Data->max_dx, &Data->min_dy, &Data->max_dy, x, y, 16, |
|
|
pParam->width, pParam->height, Data->iFcode, 0); // get real range |
|
|
|
|
1943 |
QuarterpelRefine(Data); |
QuarterpelRefine(Data); |
1944 |
} |
} |
1945 |
|
|
1946 |
|
|
1947 |
if (inter4v) { |
if (inter4v) { |
1948 |
SearchData Data8; |
SearchData Data8; |
1949 |
Data8.iFcode = Data->iFcode; |
Data8.iFcode = Data->iFcode; |
1950 |
Data8.iQuant = Data->iQuant; |
Data8.lambda8 = lambda_vec8[pMB->quant]; |
1951 |
Data8.iEdgedWidth = Data->iEdgedWidth; |
Data8.iEdgedWidth = Data->iEdgedWidth; |
1952 |
Search8hinted(Data, 2*x, 2*y, MotionFlags, pParam, pMB, pMBs, 0, &Data8); |
Search8hinted(Data, 2*x, 2*y, MotionFlags, pParam, pMB, pMBs, 0, &Data8); |
1953 |
Search8hinted(Data, 2*x + 1, 2*y, MotionFlags, pParam, pMB, pMBs, 1, &Data8); |
Search8hinted(Data, 2*x + 1, 2*y, MotionFlags, pParam, pMB, pMBs, 1, &Data8); |
1995 |
int32_t temp[5], quant = current->quant; |
int32_t temp[5], quant = current->quant; |
1996 |
int32_t iMinSAD[5]; |
int32_t iMinSAD[5]; |
1997 |
VECTOR currentMV[5]; |
VECTOR currentMV[5]; |
|
VECTOR currentQMV[5]; |
|
1998 |
SearchData Data; |
SearchData Data; |
1999 |
Data.iEdgedWidth = pParam->edged_width; |
Data.iEdgedWidth = pParam->edged_width; |
2000 |
Data.currentMV = currentMV; |
Data.currentMV = currentMV; |
|
Data.currentQMV = currentQMV; |
|
2001 |
Data.iMinSAD = iMinSAD; |
Data.iMinSAD = iMinSAD; |
2002 |
Data.temp = temp; |
Data.temp = temp; |
2003 |
Data.iFcode = current->fcode; |
Data.iFcode = current->fcode; |
2008 |
// somehow this is dirty since I think we shouldn't use malloc outside |
// somehow this is dirty since I think we shouldn't use malloc outside |
2009 |
// encoder_create() - so please fix me! |
// encoder_create() - so please fix me! |
2010 |
|
|
2011 |
|
Data.RefQ = qimage; |
2012 |
|
|
2013 |
if (sadInit) (*sadInit) (); |
if (sadInit) (*sadInit) (); |
2014 |
|
|
2015 |
for (y = 0; y < pParam->mb_height; y++) { |
for (y = 0; y < pParam->mb_height; y++) { |
2032 |
pMB->quant = quant; |
pMB->quant = quant; |
2033 |
} |
} |
2034 |
|
|
2035 |
SearchPhinted(pRef->y, pRefH->y, pRefV->y, pRefHV->y, qimage, pCurrent, x, |
SearchPhinted(pRef->y, pRefH->y, pRefV->y, pRefHV->y, pCurrent, x, |
2036 |
y, current->motion_flags, pMB->quant, |
y, current->motion_flags, pMB->quant, |
2037 |
pParam, pMBs, current->global_flags & XVID_INTER4V, pMB, |
pParam, pMBs, current->global_flags & XVID_INTER4V, pMB, |
2038 |
&Data); |
&Data); |
2078 |
if (!(mask = make_mask(pmv, 2))) |
if (!(mask = make_mask(pmv, 2))) |
2079 |
CheckCandidate16no4vI(0, 0, mask, &i, Data); |
CheckCandidate16no4vI(0, 0, mask, &i, Data); |
2080 |
|
|
2081 |
DiamondSearch(Data->currentMV->x, Data->currentMV->y, Data, i); |
DiamondSearch(Data->currentMV->x, Data->currentMV->y, Data, mask); |
2082 |
|
|
2083 |
pMB->mvs[0] = pMB->mvs[1] |
pMB->mvs[0] = pMB->mvs[1] |
2084 |
= pMB->mvs[2] = pMB->mvs[3] = *Data->currentMV; // all, for future get_pmv() |
= pMB->mvs[2] = pMB->mvs[3] = *Data->currentMV; // all, for future get_pmv() |
2106 |
Data.currentMV = ¤tMV; |
Data.currentMV = ¤tMV; |
2107 |
Data.iMinSAD = &iMinSAD; |
Data.iMinSAD = &iMinSAD; |
2108 |
Data.iFcode = iFcode; |
Data.iFcode = iFcode; |
|
Data.iQuant = 2; |
|
2109 |
|
|
2110 |
if (sadInit) (*sadInit) (); |
if (sadInit) (*sadInit) (); |
2111 |
|
|
2112 |
for (y = 0; y < pParam->mb_height-1; y++) { |
for (y = 1; y < pParam->mb_height-1; y++) { |
2113 |
for (x = 0; x < pParam->mb_width; x++) { |
for (x = 1; x < pParam->mb_width-1; x++) { |
2114 |
int sad, dev; |
int sad, dev; |
2115 |
MACROBLOCK *pMB = &pMBs[x + y * pParam->mb_width]; |
MACROBLOCK *pMB = &pMBs[x + y * pParam->mb_width]; |
2116 |
|
|
2117 |
sad = MEanalyzeMB(pRef->y, pCurrent->y, x, y, |
sad = MEanalyzeMB(pRef->y, pCurrent->y, x, y, |
2118 |
pParam, pMBs, pMB, &Data); |
pParam, pMBs, pMB, &Data); |
2119 |
|
|
|
if ( x != 0 && y != 0 && x != pParam->mb_width-1 ) { //no edge macroblocks, they just don't work |
|
2120 |
if (sad > INTRA_THRESH) { |
if (sad > INTRA_THRESH) { |
2121 |
dev = dev16(pCurrent->y + (x + y * pParam->edged_width) * 16, |
dev = dev16(pCurrent->y + (x + y * pParam->edged_width) * 16, |
2122 |
pParam->edged_width); |
pParam->edged_width); |
2125 |
} |
} |
2126 |
sSAD += sad; |
sSAD += sad; |
2127 |
} |
} |
|
|
|
|
} |
|
2128 |
} |
} |
2129 |
sSAD /= (pParam->mb_height-2)*(pParam->mb_width-2); |
sSAD /= (pParam->mb_height-2)*(pParam->mb_width-2); |
2130 |
if (sSAD > INTER_THRESH ) return 1; //P frame |
if (sSAD > INTER_THRESH ) return 1; //P frame |