--- trunk/xvidcore/src/utils/mbtransquant.c 2002/08/17 16:22:58 375 +++ trunk/xvidcore/src/utils/mbtransquant.c 2003/02/15 15:22:19 851 @@ -65,12 +65,11 @@ #include "../quant/quant_h263.h" #include "../encoder.h" -#define MIN(X, Y) ((X)<(Y)?(X):(Y)) -#define MAX(X, Y) ((X)>(Y)?(X):(Y)) +#include "../image/reduced.h" -#define TOOSMALL_LIMIT 3 /* skip blocks having a coefficient sum below this value */ +MBFIELDTEST_PTR MBFieldTest; -/* this isnt pretty, but its better than 20 ifdefs */ +#define TOOSMALL_LIMIT 1 /* skip blocks having a coefficient sum below this value */ void MBTransQuantIntra(const MBParam * pParam, @@ -84,28 +83,45 @@ uint32_t stride = pParam->edged_width; uint32_t stride2 = stride / 2; - uint32_t next_block = stride * 8; + uint32_t next_block = stride * ((frame->global_flags & XVID_REDUCED)?16:8); uint32_t i; uint32_t iQuant = frame->quant; uint8_t *pY_Cur, *pU_Cur, *pV_Cur; IMAGE *pCurrent = &frame->image; - pY_Cur = pCurrent->y + (y_pos << 4) * stride + (x_pos << 4); - pU_Cur = pCurrent->u + (y_pos << 3) * stride2 + (x_pos << 3); - pV_Cur = pCurrent->v + (y_pos << 3) * stride2 + (x_pos << 3); - start_timer(); - transfer_8to16copy(&data[0 * 64], pY_Cur, stride); - transfer_8to16copy(&data[1 * 64], pY_Cur + 8, stride); - transfer_8to16copy(&data[2 * 64], pY_Cur + next_block, stride); - transfer_8to16copy(&data[3 * 64], pY_Cur + next_block + 8, stride); - transfer_8to16copy(&data[4 * 64], pU_Cur, stride2); - transfer_8to16copy(&data[5 * 64], pV_Cur, stride2); + if ((frame->global_flags & XVID_REDUCED)) + { + pY_Cur = pCurrent->y + (y_pos << 5) * stride + (x_pos << 5); + pU_Cur = pCurrent->u + (y_pos << 4) * stride2 + (x_pos << 4); + pV_Cur = pCurrent->v + (y_pos << 4) * stride2 + (x_pos << 4); + + filter_18x18_to_8x8(&data[0 * 64], pY_Cur, stride); + filter_18x18_to_8x8(&data[1 * 64], pY_Cur + 16, stride); + filter_18x18_to_8x8(&data[2 * 64], pY_Cur + next_block, stride); + filter_18x18_to_8x8(&data[3 * 64], pY_Cur + next_block + 16, stride); + filter_18x18_to_8x8(&data[4 * 64], pU_Cur, stride2); + filter_18x18_to_8x8(&data[5 * 64], pV_Cur, stride2); + }else{ + pY_Cur = pCurrent->y + (y_pos << 4) * stride + (x_pos << 4); + pU_Cur = pCurrent->u + (y_pos << 3) * stride2 + (x_pos << 3); + pV_Cur = pCurrent->v + (y_pos << 3) * stride2 + (x_pos << 3); + + transfer_8to16copy(&data[0 * 64], pY_Cur, stride); + transfer_8to16copy(&data[1 * 64], pY_Cur + 8, stride); + transfer_8to16copy(&data[2 * 64], pY_Cur + next_block, stride); + transfer_8to16copy(&data[3 * 64], pY_Cur + next_block + 8, stride); + transfer_8to16copy(&data[4 * 64], pU_Cur, stride2); + transfer_8to16copy(&data[5 * 64], pV_Cur, stride2); + } stop_transfer_timer(); + /* XXX: rrv+interlacing is buggy */ start_timer(); pMB->field_dct = 0; - if ((frame->global_flags & XVID_INTERLACING)) { + if ((frame->global_flags & XVID_INTERLACING) && + (x_pos>0) && (x_posmb_width-1) && + (y_pos>0) && (y_posmb_height-1)) { pMB->field_dct = MBDecideFieldDCT(data); } stop_interlacing_timer(); @@ -121,38 +137,60 @@ start_timer(); quant_intra(&qcoeff[i * 64], &data[i * 64], iQuant, iDcScaler); stop_quant_timer(); - - start_timer(); - dequant_intra(&data[i * 64], &qcoeff[i * 64], iQuant, iDcScaler); - stop_iquant_timer(); } else { start_timer(); quant4_intra(&qcoeff[i * 64], &data[i * 64], iQuant, iDcScaler); stop_quant_timer(); + } + + /* speedup: dont decode when encoding only ivops */ + if (pParam->iMaxKeyInterval != 1 || pParam->max_bframes > 0) + { + if (pParam->m_quant_type == H263_QUANT) { + start_timer(); + dequant_intra(&data[i * 64], &qcoeff[i * 64], iQuant, iDcScaler); + stop_iquant_timer(); + } else { + start_timer(); + dequant4_intra(&data[i * 64], &qcoeff[i * 64], iQuant, iDcScaler); + stop_iquant_timer(); + } start_timer(); - dequant4_intra(&data[i * 64], &qcoeff[i * 64], iQuant, iDcScaler); - stop_iquant_timer(); + idct(&data[i * 64]); + stop_idct_timer(); } - - start_timer(); - idct(&data[i * 64]); - stop_idct_timer(); } - if (pMB->field_dct) { - next_block = stride; - stride *= 2; - } + /* speedup: dont decode when encoding only ivops */ + if (pParam->iMaxKeyInterval != 1 || pParam->max_bframes > 0) + { + + if (pMB->field_dct) { + next_block = stride; + stride *= 2; + } - start_timer(); - transfer_16to8copy(pY_Cur, &data[0 * 64], stride); - transfer_16to8copy(pY_Cur + 8, &data[1 * 64], stride); - transfer_16to8copy(pY_Cur + next_block, &data[2 * 64], stride); - transfer_16to8copy(pY_Cur + next_block + 8, &data[3 * 64], stride); - transfer_16to8copy(pU_Cur, &data[4 * 64], stride2); - transfer_16to8copy(pV_Cur, &data[5 * 64], stride2); - stop_transfer_timer(); + start_timer(); + if ((frame->global_flags & XVID_REDUCED)) + { + copy_upsampled_8x8_16to8(pY_Cur, &data[0 * 64], stride); + copy_upsampled_8x8_16to8(pY_Cur + 16, &data[1 * 64], stride); + copy_upsampled_8x8_16to8(pY_Cur + next_block, &data[2 * 64], stride); + copy_upsampled_8x8_16to8(pY_Cur + next_block + 16, &data[3 * 64], stride); + copy_upsampled_8x8_16to8(pU_Cur, &data[4 * 64], stride2); + copy_upsampled_8x8_16to8(pV_Cur, &data[5 * 64], stride2); + + }else{ + transfer_16to8copy(pY_Cur, &data[0 * 64], stride); + transfer_16to8copy(pY_Cur + 8, &data[1 * 64], stride); + transfer_16to8copy(pY_Cur + next_block, &data[2 * 64], stride); + transfer_16to8copy(pY_Cur + next_block + 8, &data[3 * 64], stride); + transfer_16to8copy(pU_Cur, &data[4 * 64], stride2); + transfer_16to8copy(pV_Cur, &data[5 * 64], stride2); + } + stop_transfer_timer(); + } } @@ -169,7 +207,7 @@ uint32_t stride = pParam->edged_width; uint32_t stride2 = stride / 2; - uint32_t next_block = stride * 8; + uint32_t next_block = stride * ((frame->global_flags & XVID_REDUCED)?16:8); uint32_t i; uint32_t iQuant = frame->quant; uint8_t *pY_Cur, *pU_Cur, *pV_Cur; @@ -177,18 +215,29 @@ uint32_t sum; IMAGE *pCurrent = &frame->image; - pY_Cur = pCurrent->y + (y_pos << 4) * stride + (x_pos << 4); - pU_Cur = pCurrent->u + (y_pos << 3) * stride2 + (x_pos << 3); - pV_Cur = pCurrent->v + (y_pos << 3) * stride2 + (x_pos << 3); + if ((frame->global_flags & XVID_REDUCED)) + { + pY_Cur = pCurrent->y + (y_pos << 5) * stride + (x_pos << 5); + pU_Cur = pCurrent->u + (y_pos << 4) * stride2 + (x_pos << 4); + pV_Cur = pCurrent->v + (y_pos << 4) * stride2 + (x_pos << 4); + }else{ + pY_Cur = pCurrent->y + (y_pos << 4) * stride + (x_pos << 4); + pU_Cur = pCurrent->u + (y_pos << 3) * stride2 + (x_pos << 3); + pV_Cur = pCurrent->v + (y_pos << 3) * stride2 + (x_pos << 3); + } start_timer(); pMB->field_dct = 0; - if ((frame->global_flags & XVID_INTERLACING)) { + if ((frame->global_flags & XVID_INTERLACING) && + (x_pos>0) && (x_posmb_width-1) && + (y_pos>0) && (y_posmb_height-1)) { pMB->field_dct = MBDecideFieldDCT(data); } stop_interlacing_timer(); for (i = 0; i < 6; i++) { + uint32_t increase_limit = (iQuant == 1) ? 1 : 0; + /* * no need to transfer 8->16-bit * (this is performed already in motion compensation) @@ -207,7 +256,7 @@ stop_quant_timer(); } - if ((sum >= TOOSMALL_LIMIT) || (qcoeff[i*64] != 0) || + if ((sum >= TOOSMALL_LIMIT + increase_limit) || (qcoeff[i*64] != 0) || (qcoeff[i*64+1] != 0) || (qcoeff[i*64+8] != 0)) { if (pParam->m_quant_type == H263_QUANT) { @@ -234,18 +283,34 @@ } start_timer(); - if (cbp & 32) - transfer_16to8add(pY_Cur, &data[0 * 64], stride); - if (cbp & 16) - transfer_16to8add(pY_Cur + 8, &data[1 * 64], stride); - if (cbp & 8) - transfer_16to8add(pY_Cur + next_block, &data[2 * 64], stride); - if (cbp & 4) - transfer_16to8add(pY_Cur + next_block + 8, &data[3 * 64], stride); - if (cbp & 2) - transfer_16to8add(pU_Cur, &data[4 * 64], stride2); - if (cbp & 1) - transfer_16to8add(pV_Cur, &data[5 * 64], stride2); + if ((frame->global_flags & XVID_REDUCED)) + { + if (cbp & 32) + add_upsampled_8x8_16to8(pY_Cur, &data[0 * 64], stride); + if (cbp & 16) + add_upsampled_8x8_16to8(pY_Cur + 16, &data[1 * 64], stride); + if (cbp & 8) + add_upsampled_8x8_16to8(pY_Cur + next_block, &data[2 * 64], stride); + if (cbp & 4) + add_upsampled_8x8_16to8(pY_Cur + 16 + next_block, &data[3 * 64], stride); + if (cbp & 2) + add_upsampled_8x8_16to8(pU_Cur, &data[4 * 64], stride2); + if (cbp & 1) + add_upsampled_8x8_16to8(pV_Cur, &data[5 * 64], stride2); + }else{ + if (cbp & 32) + transfer_16to8add(pY_Cur, &data[0 * 64], stride); + if (cbp & 16) + transfer_16to8add(pY_Cur + 8, &data[1 * 64], stride); + if (cbp & 8) + transfer_16to8add(pY_Cur + next_block, &data[2 * 64], stride); + if (cbp & 4) + transfer_16to8add(pY_Cur + next_block + 8, &data[3 * 64], stride); + if (cbp & 2) + transfer_16to8add(pU_Cur, &data[4 * 64], stride2); + if (cbp & 1) + transfer_16to8add(pV_Cur, &data[5 * 64], stride2); + } stop_transfer_timer(); return cbp; @@ -296,8 +361,6 @@ MBTransQuantInterBVOP(const MBParam * pParam, FRAMEINFO * frame, MACROBLOCK * pMB, - const uint32_t x_pos, - const uint32_t y_pos, int16_t data[6 * 64], int16_t qcoeff[6 * 64]) { @@ -574,13 +637,27 @@ -/* if sum(diff between field lines) < sum(diff between frame lines), use field dct */ +/* permute block and return field dct choice */ uint32_t MBDecideFieldDCT(int16_t data[6 * 64]) { + uint32_t field = MBFieldTest(data); + if (field) { + MBFrameToField(data); + } + + return field; +} + + +/* if sum(diff between field lines) < sum(diff between frame lines), use field dct */ + +uint32_t +MBFieldTest_c(int16_t data[6 * 64]) +{ const uint8_t blocks[] = { 0 * 64, 0 * 64, 0 * 64, 0 * 64, 2 * 64, 2 * 64, 2 * 64, 2 * 64 }; const uint8_t lines[] = { 0, 16, 32, 48, 0, 16, 32, 48 }; @@ -614,11 +691,7 @@ } } - if (frame > field) { - MBFrameToField(data); - } - - return (frame > field); + return (frame >= (field + 350)); }