--- branches/dev-api-4/xvidcore/src/utils/mbtransquant.c 2003/03/30 13:16:42 965 +++ branches/dev-api-4/xvidcore/src/utils/mbtransquant.c 2003/09/10 00:54:27 1139 @@ -21,10 +21,12 @@ * along with this program ; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * - * $Id: mbtransquant.c,v 1.21.2.6 2003-03-30 13:16:41 edgomez Exp $ + * $Id: mbtransquant.c,v 1.21.2.16 2003-09-10 00:54:27 edgomez Exp $ * ****************************************************************************/ +#include +#include #include #include "../portab.h" @@ -33,6 +35,8 @@ #include "../global.h" #include "mem_transfer.h" #include "timer.h" +#include "../bitstream/mbcoding.h" +#include "../bitstream/zigzag.h" #include "../dct/fdct.h" #include "../dct/idct.h" #include "../quant/quant_mpeg4.h" @@ -68,13 +72,13 @@ /* Performs Forward DCT on all blocks */ static __inline void -MBfDCT(const MBParam * pParam, - FRAMEINFO * frame, - MACROBLOCK * pMB, +MBfDCT(const MBParam * const pParam, + const FRAMEINFO * const frame, + MACROBLOCK * const pMB, uint32_t x_pos, uint32_t y_pos, int16_t data[6 * 64]) -{ +{ /* Handles interlacing */ start_timer(); pMB->field_dct = 0; @@ -114,23 +118,33 @@ /* Quantize all blocks -- Intra mode */ static __inline void MBQuantIntra(const MBParam * pParam, + const FRAMEINFO * const frame, const MACROBLOCK * pMB, - int16_t qcoeff[6 * 64], + int16_t qcoeff[6 * 64], int16_t data[6*64]) { - int i; + int mpeg; + int scaler_lum, scaler_chr; - for (i = 0; i < 6; i++) { - uint32_t iDcScaler = get_dc_scaler(pMB->quant, i < 4); + quanth263_intraFuncPtr const quant[2] = + { + (quanth263_intraFuncPtr)quant_intra, + (quanth263_intraFuncPtr)quant4_intra + }; + + mpeg = !!(pParam->vol_flags & XVID_VOL_MPEGQUANT); + scaler_lum = get_dc_scaler(pMB->quant, 1); + scaler_chr = get_dc_scaler(pMB->quant, 0); - /* Quantize the block */ - start_timer(); - if (!(pParam->vol_flags & XVID_VOL_MPEGQUANT)) - quant_intra(&data[i * 64], &qcoeff[i * 64], pMB->quant, iDcScaler); - else - quant4_intra(&data[i * 64], &qcoeff[i * 64], pMB->quant, iDcScaler); - stop_quant_timer(); - } + /* Quantize the block */ + start_timer(); + quant[mpeg](&data[0 * 64], &qcoeff[0 * 64], pMB->quant, scaler_lum); + quant[mpeg](&data[1 * 64], &qcoeff[1 * 64], pMB->quant, scaler_lum); + quant[mpeg](&data[2 * 64], &qcoeff[2 * 64], pMB->quant, scaler_lum); + quant[mpeg](&data[3 * 64], &qcoeff[3 * 64], pMB->quant, scaler_lum); + quant[mpeg](&data[4 * 64], &qcoeff[4 * 64], pMB->quant, scaler_chr); + quant[mpeg](&data[5 * 64], &qcoeff[5 * 64], pMB->quant, scaler_chr); + stop_quant_timer(); } /* DeQuantize all blocks -- Intra mode */ @@ -140,23 +154,54 @@ int16_t qcoeff[6 * 64], int16_t data[6*64]) { - int i; + int mpeg; + int scaler_lum, scaler_chr; - for (i = 0; i < 6; i++) { - uint32_t iDcScaler = get_dc_scaler(iQuant, i < 4); + quanth263_intraFuncPtr const dequant[2] = + { + (quanth263_intraFuncPtr)dequant_intra, + (quanth263_intraFuncPtr)dequant4_intra + }; + + mpeg = !!(pParam->vol_flags & XVID_VOL_MPEGQUANT); + scaler_lum = get_dc_scaler(iQuant, 1); + scaler_chr = get_dc_scaler(iQuant, 0); - start_timer(); - if (!(pParam->vol_flags & XVID_VOL_MPEGQUANT)) - dequant_intra(&qcoeff[i * 64], &data[i * 64], iQuant, iDcScaler); - else - dequant4_intra(&qcoeff[i * 64], &data[i * 64], iQuant, iDcScaler); - stop_iquant_timer(); - } + start_timer(); + dequant[mpeg](&qcoeff[0 * 64], &data[0 * 64], iQuant, scaler_lum); + dequant[mpeg](&qcoeff[1 * 64], &data[1 * 64], iQuant, scaler_lum); + dequant[mpeg](&qcoeff[2 * 64], &data[2 * 64], iQuant, scaler_lum); + dequant[mpeg](&qcoeff[3 * 64], &data[3 * 64], iQuant, scaler_lum); + dequant[mpeg](&qcoeff[4 * 64], &data[4 * 64], iQuant, scaler_chr); + dequant[mpeg](&qcoeff[5 * 64], &data[5 * 64], iQuant, scaler_chr); + stop_iquant_timer(); } + +typedef int (*trellis_func_ptr_t)(int16_t *const Out, + const int16_t *const In, + int Q, + const uint16_t * const Zigzag, + int Non_Zero); + +static int +dct_quantize_trellis_h263_c(int16_t *const Out, + const int16_t *const In, + int Q, + const uint16_t * const Zigzag, + int Non_Zero); + +static int +dct_quantize_trellis_mpeg_c(int16_t *const Out, + const int16_t *const In, + int Q, + const uint16_t * const Zigzag, + int Non_Zero); + /* Quantize all blocks -- Inter mode */ static __inline uint8_t MBQuantInter(const MBParam * pParam, + const FRAMEINFO * const frame, const MACROBLOCK * pMB, int16_t data[6 * 64], int16_t qcoeff[6 * 64], @@ -167,16 +212,32 @@ int i; uint8_t cbp = 0; int sum; - int code_block; + int code_block, mpeg; + + quanth263_interFuncPtr const quant[2] = + { + (quanth263_interFuncPtr)quant_inter, + (quanth263_interFuncPtr)quant4_inter + }; + + trellis_func_ptr_t const trellis[2] = + { + (trellis_func_ptr_t)dct_quantize_trellis_h263_c, + (trellis_func_ptr_t)dct_quantize_trellis_mpeg_c + }; + + mpeg = !!(pParam->vol_flags & XVID_VOL_MPEGQUANT); for (i = 0; i < 6; i++) { - + /* Quantize the block */ start_timer(); - if (!(pParam->vol_flags & XVID_VOL_MPEGQUANT)) - sum = quant_inter(&qcoeff[i * 64], &data[i * 64], pMB->quant); - else - sum = quant4_inter(&qcoeff[i * 64], &data[i * 64], pMB->quant); + + sum = quant[mpeg](&qcoeff[i*64], &data[i*64], pMB->quant); + + if(sum && (frame->vop_flags & XVID_VOP_TRELLISQUANT)) { + sum = trellis[mpeg](&qcoeff[i*64], &data[i*64], pMB->quant, &scan_tables[0][0], 63); + } stop_quant_timer(); /* @@ -201,32 +262,37 @@ /* Set the corresponding cbp bit */ cbp |= code_block << (5 - i); - } return(cbp); } /* DeQuantize all blocks -- Inter mode */ -static __inline void +static __inline void MBDeQuantInter(const MBParam * pParam, const int iQuant, int16_t data[6 * 64], int16_t qcoeff[6 * 64], const uint8_t cbp) { - int i; + int mpeg; - for (i = 0; i < 6; i++) { - if (cbp & (1 << (5 - i))) { - start_timer(); - if (!(pParam->vol_flags & XVID_VOL_MPEGQUANT)) - dequant_inter(&data[i * 64], &qcoeff[i * 64], iQuant); - else - dequant4_inter(&data[i * 64], &qcoeff[i * 64], iQuant); - stop_iquant_timer(); - } - } + quanth263_interFuncPtr const dequant[2] = + { + (quanth263_interFuncPtr)dequant_inter, + (quanth263_interFuncPtr)dequant4_inter + }; + + mpeg = !!(pParam->vol_flags & XVID_VOL_MPEGQUANT); + + start_timer(); + if(cbp & (1 << (5 - 0))) dequant[mpeg](&data[0 * 64], &qcoeff[0 * 64], iQuant); + if(cbp & (1 << (5 - 1))) dequant[mpeg](&data[1 * 64], &qcoeff[1 * 64], iQuant); + if(cbp & (1 << (5 - 2))) dequant[mpeg](&data[2 * 64], &qcoeff[2 * 64], iQuant); + if(cbp & (1 << (5 - 3))) dequant[mpeg](&data[3 * 64], &qcoeff[3 * 64], iQuant); + if(cbp & (1 << (5 - 4))) dequant[mpeg](&data[4 * 64], &qcoeff[4 * 64], iQuant); + if(cbp & (1 << (5 - 5))) dequant[mpeg](&data[5 * 64], &qcoeff[5 * 64], iQuant); + stop_iquant_timer(); } typedef void (transfer_operation_8to16_t) (int16_t *Dst, const uint8_t *Src, int BpS); @@ -234,9 +300,9 @@ static __inline void -MBTrans8to16(const MBParam * pParam, - FRAMEINFO * frame, - MACROBLOCK * pMB, +MBTrans8to16(const MBParam * const pParam, + const FRAMEINFO * const frame, + const MACROBLOCK * const pMB, const uint32_t x_pos, const uint32_t y_pos, int16_t data[6 * 64]) @@ -244,36 +310,29 @@ uint32_t stride = pParam->edged_width; uint32_t stride2 = stride / 2; uint32_t next_block = stride * 8; - int32_t cst; + int32_t cst; + int vop_reduced; uint8_t *pY_Cur, *pU_Cur, *pV_Cur; - IMAGE *pCurrent = &frame->image; + const IMAGE * const pCurrent = &frame->image; + transfer_operation_8to16_t * const functions[2] = + { + (transfer_operation_8to16_t *)transfer_8to16copy, + (transfer_operation_8to16_t *)filter_18x18_to_8x8 + }; transfer_operation_8to16_t *transfer_op = NULL; - if ((frame->vop_flags & XVID_VOP_REDUCED)) { + vop_reduced = !!(frame->vop_flags & XVID_VOP_REDUCED); - /* Image pointers */ - pY_Cur = pCurrent->y + (y_pos << 5) * stride + (x_pos << 5); - pU_Cur = pCurrent->u + (y_pos << 4) * stride2 + (x_pos << 4); - pV_Cur = pCurrent->v + (y_pos << 4) * stride2 + (x_pos << 4); - - /* Block size */ - cst = 16; - - /* Operation function */ - transfer_op = (transfer_operation_8to16_t*)filter_18x18_to_8x8; - } else { - - /* Image pointers */ - pY_Cur = pCurrent->y + (y_pos << 4) * stride + (x_pos << 4); - pU_Cur = pCurrent->u + (y_pos << 3) * stride2 + (x_pos << 3); - pV_Cur = pCurrent->v + (y_pos << 3) * stride2 + (x_pos << 3); + /* Image pointers */ + pY_Cur = pCurrent->y + (y_pos << (4+vop_reduced)) * stride + (x_pos << (4+vop_reduced)); + pU_Cur = pCurrent->u + (y_pos << (3+vop_reduced)) * stride2 + (x_pos << (3+vop_reduced)); + pV_Cur = pCurrent->v + (y_pos << (3+vop_reduced)) * stride2 + (x_pos << (3+vop_reduced)); - /* Block size */ - cst = 8; + /* Block size */ + cst = 8<edged_width; uint32_t stride2 = stride / 2; uint32_t next_block = stride * 8; - uint32_t cst; - IMAGE *pCurrent = &frame->image; + uint32_t cst; + int vop_reduced; + const IMAGE * const pCurrent = &frame->image; + /* Array of function pointers, indexed by [vop_reduced<<1+add] */ + transfer_operation_16to8_t * const functions[4] = + { + (transfer_operation_16to8_t*)transfer_16to8copy, + (transfer_operation_16to8_t*)transfer_16to8add, + (transfer_operation_16to8_t*)copy_upsampled_8x8_16to8, + (transfer_operation_16to8_t*)add_upsampled_8x8_16to8 + }; + transfer_operation_16to8_t *transfer_op = NULL; if (pMB->field_dct) { @@ -309,46 +378,28 @@ stride *= 2; } - if ((frame->vop_flags & XVID_VOP_REDUCED)) { + /* Makes this vars booleans */ + vop_reduced = !!(frame->vop_flags & XVID_VOP_REDUCED); - /* Image pointers */ - pY_Cur = pCurrent->y + (y_pos << 5) * stride + (x_pos << 5); - pU_Cur = pCurrent->u + (y_pos << 4) * stride2 + (x_pos << 4); - pV_Cur = pCurrent->v + (y_pos << 4) * stride2 + (x_pos << 4); - - /* Block size */ - cst = 16; - - /* Operation function */ - if(add) - transfer_op = (transfer_operation_16to8_t*)add_upsampled_8x8_16to8; - else - transfer_op = (transfer_operation_16to8_t*)copy_upsampled_8x8_16to8; - } else { + /* Image pointers */ + pY_Cur = pCurrent->y + (y_pos << (4+vop_reduced)) * stride + (x_pos << (4+vop_reduced)); + pU_Cur = pCurrent->u + (y_pos << (3+vop_reduced)) * stride2 + (x_pos << (3+vop_reduced)); + pV_Cur = pCurrent->v + (y_pos << (3+vop_reduced)) * stride2 + (x_pos << (3+vop_reduced)); - /* Image pointers */ - pY_Cur = pCurrent->y + (y_pos << 4) * stride + (x_pos << 4); - pU_Cur = pCurrent->u + (y_pos << 3) * stride2 + (x_pos << 3); - pV_Cur = pCurrent->v + (y_pos << 3) * stride2 + (x_pos << 3); - - /* Block size */ - cst = 8; - - /* Operation function */ - if(add) - transfer_op = (transfer_operation_16to8_t*)transfer_16to8add; - else - transfer_op = (transfer_operation_16to8_t*)transfer_16to8copy; - } + /* Block size */ + cst = 8<quant, data, qcoeff); @@ -387,9 +438,9 @@ uint8_t -MBTransQuantInter(const MBParam * pParam, - FRAMEINFO * frame, - MACROBLOCK * pMB, +MBTransQuantInter(const MBParam * const pParam, + const FRAMEINFO * const frame, + MACROBLOCK * const pMB, const uint32_t x_pos, const uint32_t y_pos, int16_t data[6 * 64], @@ -409,8 +460,11 @@ /* Set the limit threshold */ limit = PVOP_TOOSMALL_LIMIT + ((pMB->quant == 1)? 1 : 0); + if (frame->vop_flags & XVID_VOP_CARTOON) + limit *= 3; + /* Quantize the block */ - cbp = MBQuantInter(pParam, pMB, data, qcoeff, 0, limit); + cbp = MBQuantInter(pParam, frame, pMB, data, qcoeff, 0, limit); /* DeQuantize the block */ MBDeQuantInter(pParam, pMB->quant, data, qcoeff, cbp); @@ -420,22 +474,22 @@ /* Transfer back the data -- Add the data */ MBTrans16to8(pParam, frame, pMB, x_pos, y_pos, data, 1, cbp); - + return(cbp); } uint8_t MBTransQuantInterBVOP(const MBParam * pParam, - FRAMEINFO * frame, - MACROBLOCK * pMB, - const uint32_t x_pos, - const uint32_t y_pos, - int16_t data[6 * 64], - int16_t qcoeff[6 * 64]) + FRAMEINFO * frame, + MACROBLOCK * pMB, + const uint32_t x_pos, + const uint32_t y_pos, + int16_t data[6 * 64], + int16_t qcoeff[6 * 64]) { uint8_t cbp; uint32_t limit; - + /* * There is no MBTrans8to16 for Inter block, that's done in motion compensation * already @@ -447,8 +501,11 @@ /* Set the limit threshold */ limit = BVOP_TOOSMALL_LIMIT; + if (frame->vop_flags & XVID_VOP_CARTOON) + limit *= 2; + /* Quantize the block */ - cbp = MBQuantInter(pParam, pMB, data, qcoeff, 1, limit); + cbp = MBQuantInter(pParam, frame, pMB, data, qcoeff, 1, limit); /* * History comment: @@ -486,25 +543,25 @@ for (i = 0; i < 7; ++i) { for (j = 0; j < 8; ++j) { frame += - ABS(data[0 * 64 + (i + 1) * 8 + j] - data[0 * 64 + i * 8 + j]); + abs(data[0 * 64 + (i + 1) * 8 + j] - data[0 * 64 + i * 8 + j]); frame += - ABS(data[1 * 64 + (i + 1) * 8 + j] - data[1 * 64 + i * 8 + j]); + abs(data[1 * 64 + (i + 1) * 8 + j] - data[1 * 64 + i * 8 + j]); frame += - ABS(data[2 * 64 + (i + 1) * 8 + j] - data[2 * 64 + i * 8 + j]); + abs(data[2 * 64 + (i + 1) * 8 + j] - data[2 * 64 + i * 8 + j]); frame += - ABS(data[3 * 64 + (i + 1) * 8 + j] - data[3 * 64 + i * 8 + j]); + abs(data[3 * 64 + (i + 1) * 8 + j] - data[3 * 64 + i * 8 + j]); field += - ABS(data[blocks[i + 1] + lines[i + 1] + j] - + abs(data[blocks[i + 1] + lines[i + 1] + j] - data[blocks[i] + lines[i] + j]); field += - ABS(data[blocks[i + 1] + lines[i + 1] + 8 + j] - + abs(data[blocks[i + 1] + lines[i + 1] + 8 + j] - data[blocks[i] + lines[i] + 8 + j]); field += - ABS(data[blocks[i + 1] + 64 + lines[i + 1] + j] - + abs(data[blocks[i + 1] + 64 + lines[i + 1] + j] - data[blocks[i] + 64 + lines[i] + j]); field += - ABS(data[blocks[i + 1] + 64 + lines[i + 1] + 8 + j] - + abs(data[blocks[i + 1] + 64 + lines[i + 1] + 8 + j] - data[blocks[i] + 64 + lines[i] + 8 + j]); } } @@ -516,7 +573,7 @@ /* deinterlace Y blocks vertically */ #define MOVLINE(X,Y) memcpy(X, Y, sizeof(tmp)) -#define LINE(X,Y) &data[X*64 + Y*8] +#define LINE(X,Y) &data[X*64 + Y*8] void MBFrameToField(int16_t data[6 * 64]) @@ -525,26 +582,26 @@ /* left blocks */ - // 1=2, 2=4, 4=8, 8=1 + /* 1=2, 2=4, 4=8, 8=1 */ MOVLINE(tmp, LINE(0, 1)); MOVLINE(LINE(0, 1), LINE(0, 2)); MOVLINE(LINE(0, 2), LINE(0, 4)); MOVLINE(LINE(0, 4), LINE(2, 0)); MOVLINE(LINE(2, 0), tmp); - // 3=6, 6=12, 12=9, 9=3 + /* 3=6, 6=12, 12=9, 9=3 */ MOVLINE(tmp, LINE(0, 3)); MOVLINE(LINE(0, 3), LINE(0, 6)); MOVLINE(LINE(0, 6), LINE(2, 4)); MOVLINE(LINE(2, 4), LINE(2, 1)); MOVLINE(LINE(2, 1), tmp); - // 5=10, 10=5 + /* 5=10, 10=5 */ MOVLINE(tmp, LINE(0, 5)); MOVLINE(LINE(0, 5), LINE(2, 2)); MOVLINE(LINE(2, 2), tmp); - // 7=14, 14=13, 13=11, 11=7 + /* 7=14, 14=13, 13=11, 11=7 */ MOVLINE(tmp, LINE(0, 7)); MOVLINE(LINE(0, 7), LINE(2, 6)); MOVLINE(LINE(2, 6), LINE(2, 5)); @@ -553,29 +610,725 @@ /* right blocks */ - // 1=2, 2=4, 4=8, 8=1 + /* 1=2, 2=4, 4=8, 8=1 */ MOVLINE(tmp, LINE(1, 1)); MOVLINE(LINE(1, 1), LINE(1, 2)); MOVLINE(LINE(1, 2), LINE(1, 4)); MOVLINE(LINE(1, 4), LINE(3, 0)); MOVLINE(LINE(3, 0), tmp); - // 3=6, 6=12, 12=9, 9=3 + /* 3=6, 6=12, 12=9, 9=3 */ MOVLINE(tmp, LINE(1, 3)); MOVLINE(LINE(1, 3), LINE(1, 6)); MOVLINE(LINE(1, 6), LINE(3, 4)); MOVLINE(LINE(3, 4), LINE(3, 1)); MOVLINE(LINE(3, 1), tmp); - // 5=10, 10=5 + /* 5=10, 10=5 */ MOVLINE(tmp, LINE(1, 5)); MOVLINE(LINE(1, 5), LINE(3, 2)); MOVLINE(LINE(3, 2), tmp); - // 7=14, 14=13, 13=11, 11=7 + /* 7=14, 14=13, 13=11, 11=7 */ MOVLINE(tmp, LINE(1, 7)); MOVLINE(LINE(1, 7), LINE(3, 6)); MOVLINE(LINE(3, 6), LINE(3, 5)); MOVLINE(LINE(3, 5), LINE(3, 3)); MOVLINE(LINE(3, 3), tmp); } + + + + + +/***************************************************************************** + * Trellis based R-D optimal quantization + * + * Trellis Quant code (C) 2003 Pascal Massimino skal(at)planet-d.net + * + ****************************************************************************/ + + +#if 0 +static int +dct_quantize_trellis_mpeg_c(int16_t *const Out, + const int16_t *const In, + int Q, + const uint16_t * const Zigzag, + int Non_Zero) +{ + return 63; +} +#endif + +/*---------------------------------------------------------------------------- + * + * Trellis-Based quantization + * + * So far I understand this paper: + * + * "Trellis-Based R-D Optimal Quantization in H.263+" + * J.Wen, M.Luttrell, J.Villasenor + * IEEE Transactions on Image Processing, Vol.9, No.8, Aug. 2000. + * + * we are at stake with a simplified Bellmand-Ford / Dijkstra Single + * Source Shorted Path algo. But due to the underlying graph structure + * ("Trellis"), it can be turned into a dynamic programming algo, + * partially saving the explicit graph's nodes representation. And + * without using a heap, since the open frontier of the DAG is always + * known, and of fixed sized. + *--------------------------------------------------------------------------*/ + + + +/* Codes lengths for relevant levels. */ + +/* let's factorize: */ +static const uint8_t Code_Len0[64] = { + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len1[64] = { + 20,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len2[64] = { + 19,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len3[64] = { + 18,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len4[64] = { + 17,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len5[64] = { + 16,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len6[64] = { + 15,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len7[64] = { + 13,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len8[64] = { + 11,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len9[64] = { + 12,21,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len10[64] = { + 12,20,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len11[64] = { + 12,19,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len12[64] = { + 11,17,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len13[64] = { + 11,15,21,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len14[64] = { + 10,12,19,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len15[64] = { + 10,13,17,19,21,21,21,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len16[64] = { + 9,12,13,18,18,19,19,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30}; +static const uint8_t Code_Len17[64] = { + 8,11,13,14,14,14,15,19,19,19,21,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len18[64] = { + 7, 9,11,11,13,13,13,15,15,15,16,22,22,22,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len19[64] = { + 5, 7, 9,10,10,11,11,11,11,11,13,14,16,17,17,18,18,18,18,18,18,18,18,20,20,21,21,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30 }; +static const uint8_t Code_Len20[64] = { + 3, 4, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9,10,10,10,10,10,10,10,10,12,12,13,13,12,13,14,15,15, + 15,16,16,16,16,17,17,17,18,18,19,19,19,19,19,19,19,19,21,21,22,22,30,30,30,30,30,30,30,30,30,30 }; + +/* a few more table for LAST table: */ +static const uint8_t Code_Len21[64] = { + 13,20,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30}; +static const uint8_t Code_Len22[64] = { + 12,15,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30, + 30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30}; +static const uint8_t Code_Len23[64] = { + 10,12,15,15,15,16,16,16,16,17,17,17,17,17,17,17,17,18,18,18,18,18,18,18,18,19,19,19,19,20,20,20, + 20,21,21,21,21,21,21,21,21,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30}; +static const uint8_t Code_Len24[64] = { + 5, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,10,10,10,10,10,10,10,10,11,11,11,11,12,12,12, + 12,13,13,13,13,13,13,13,13,14,16,16,16,16,17,17,17,17,18,18,18,18,18,18,18,18,19,19,19,19,19,19}; + + +static const uint8_t * const B16_17_Code_Len[24] = { /* levels [1..24] */ + Code_Len20,Code_Len19,Code_Len18,Code_Len17, + Code_Len16,Code_Len15,Code_Len14,Code_Len13, + Code_Len12,Code_Len11,Code_Len10,Code_Len9, + Code_Len8, Code_Len7 ,Code_Len6 ,Code_Len5, + Code_Len4, Code_Len3, Code_Len3 ,Code_Len2, + Code_Len2, Code_Len1, Code_Len1, Code_Len1, +}; + +static const uint8_t * const B16_17_Code_Len_Last[6] = { /* levels [1..6] */ + Code_Len24,Code_Len23,Code_Len22,Code_Len21, Code_Len3, Code_Len1, +}; + +#define TL(q) 0xfe00/(q*q) + +static const int Trellis_Lambda_Tabs[31] = { + TL( 1),TL( 2),TL( 3),TL( 4),TL( 5),TL( 6), TL( 7), + TL( 8),TL( 9),TL(10),TL(11),TL(12),TL(13),TL(14), TL(15), + TL(16),TL(17),TL(18),TL(19),TL(20),TL(21),TL(22), TL(23), + TL(24),TL(25),TL(26),TL(27),TL(28),TL(29),TL(30), TL(31) +}; +#undef TL + +static int __inline +Find_Last(const int16_t *C, const uint16_t *Zigzag, int i) +{ + while(i>=0) + if (C[Zigzag[i]]) + return i; + else i--; + return -1; +} + +static int __inline +Compute_Sum(const int16_t *C, int last) +{ + int sum = 0; + + while(last--) + sum += abs(C[last]); + + return(sum); +} +/* this routine has been strippen of all debug code */ + +static int +dct_quantize_trellis_h263_c(int16_t *const Out, const int16_t *const In, int Q, const uint16_t * const Zigzag, int Non_Zero) +{ + + /* + * Note: We should search last non-zero coeffs on *real* DCT input coeffs (In[]), + * not quantized one (Out[]). However, it only improves the result *very* + * slightly (~0.01dB), whereas speed drops to crawling level :) + * Well, actually, taking 1 more coeff past Non_Zero into account sometimes helps. + */ + typedef struct { int16_t Run, Level; } NODE; + + NODE Nodes[65], Last; + uint32_t Run_Costs0[64+1]; + uint32_t * const Run_Costs = Run_Costs0 + 1; + const int Mult = 2*Q; + const int Bias = (Q-1) | 1; + const int Lev0 = Mult + Bias; + const int Lambda = Trellis_Lambda_Tabs[Q-1]; /* it's 1/lambda, actually */ + + int Run_Start = -1; + uint32_t Min_Cost = 2<<16; + + int Last_Node = -1; + uint32_t Last_Cost = 0; + + int i, j, sum; + Run_Costs[-1] = 2<<16; /* source (w/ CBP penalty) */ + + Non_Zero = Find_Last(Out, Zigzag, Non_Zero); + if (Non_Zero<0) + return 0; /* Sum is zero if there are only zero coeffs */ + + for(i=0; i<=Non_Zero; i++) { + const int AC = In[Zigzag[i]]; + const int Level1 = Out[Zigzag[i]]; + const int Dist0 = Lambda* AC*AC; + uint32_t Best_Cost = 0xf0000000; + Last_Cost += Dist0; + + /* very specialized loop for -1,0,+1 */ + if ((uint32_t)(Level1+1)<3) { + int dQ; + int Run; + uint32_t Cost0; + + if (AC<0) { + Nodes[i].Level = -1; + dQ = Lev0 + AC; + } else { + Nodes[i].Level = 1; + dQ = Lev0 - AC; + } + Cost0 = Lambda*dQ*dQ; + + Nodes[i].Run = 1; + Best_Cost = (Code_Len20[0]<<16) + Run_Costs[i-1]+Cost0; + for(Run=i-Run_Start; Run>0; --Run) { + const uint32_t Cost_Base = Cost0 + Run_Costs[i-Run]; + const uint32_t Cost = Cost_Base + (Code_Len20[Run-1]<<16); + const uint32_t lCost = Cost_Base + (Code_Len24[Run-1]<<16); + + /* + * TODO: what about tie-breaks? Should we favor short runs or + * long runs? Although the error is the same, it would not be + * spread the same way along high and low frequencies... + */ + + /* (I'd say: favour short runs => hifreq errors (HVS) -- gruel ) */ + + if (Cost1) { + dQ1 = Level1*Mult-AC + Bias; + dQ2 = dQ1 - Mult; + Level2 = Level1-1; + Tbl_L1 = (Level1<=24) ? B16_17_Code_Len[Level1-1] : Code_Len0; + Tbl_L2 = (Level2<=24) ? B16_17_Code_Len[Level2-1] : Code_Len0; + Tbl_L1_Last = (Level1<=6) ? B16_17_Code_Len_Last[Level1-1] : Code_Len0; + Tbl_L2_Last = (Level2<=6) ? B16_17_Code_Len_Last[Level2-1] : Code_Len0; + } else { /* Level1<-1 */ + dQ1 = Level1*Mult-AC - Bias; + dQ2 = dQ1 + Mult; + Level2 = Level1 + 1; + Tbl_L1 = (Level1>=-24) ? B16_17_Code_Len[Level1^-1] : Code_Len0; + Tbl_L2 = (Level2>=-24) ? B16_17_Code_Len[Level2^-1] : Code_Len0; + Tbl_L1_Last = (Level1>=- 6) ? B16_17_Code_Len_Last[Level1^-1] : Code_Len0; + Tbl_L2_Last = (Level2>=- 6) ? B16_17_Code_Len_Last[Level2^-1] : Code_Len0; + } + + Dist1 = Lambda*dQ1*dQ1; + Dist2 = Lambda*dQ2*dQ2; + dDist21 = Dist2-Dist1; + + for(Run=i-Run_Start; Run>0; --Run) + { + const uint32_t Cost_Base = Dist1 + Run_Costs[i-Run]; + uint32_t Cost1, Cost2; + int bLevel; + + /* + * for sub-optimal (but slightly worth it, speed-wise) search, uncomment the following: + * if (Cost_Base>=Best_Cost) continue; + * (? doesn't seem to have any effect -- gruel ) + */ + + Cost1 = Cost_Base + (Tbl_L1[Run-1]<<16); + Cost2 = Cost_Base + (Tbl_L2[Run-1]<<16) + dDist21; + + if (Cost2Min_Cost+(1<<16) ) + Run_Start++; + + /* spread on preceding coeffs the cost incurred by skipping this one */ + for(j=Run_Start; j=0) { + Out[Zigzag[i]] = Nodes[i].Level; + sum += abs(Nodes[i].Level); + i -= Nodes[i].Run; + } + + return sum; +} + +static int +dct_quantize_trellis_mpeg_c(int16_t *const Out, const int16_t *const In, int Q, const uint16_t * const Zigzag, int Non_Zero) +{ + /* ToDo: Ok ok it's just a place holder for Gruel -- damn write this one :-) */ + return Compute_Sum(Out, 63); +} + +/* original version including heavy debugging info */ + +#ifdef DBGTRELL + +#define DBG 0 + +static __inline uint32_t Evaluate_Cost(const int16_t *C, int Mult, int Bias, + const uint16_t * Zigzag, int Max, int Lambda) +{ +#if (DBG>0) + const int16_t * const Ref = C + 6*64; + int Last = Max; + int Bits = 0; + int Dist = 0; + int i; + uint32_t Cost; + + while(Last>=0 && C[Zigzag[Last]]==0) + Last--; + + if (Last>=0) { + int j=0, j0=0; + int Run, Level; + + Bits = 2; /* CBP */ + while(j=-24 && Level<=24) + Bits += B16_17_Code_Len[(Level<0) ? -Level-1 : Level-1][Run]; + else + Bits += 30; + } + Level = C[Zigzag[Last]]; + Run = j - j0; + if (Level>=-6 && Level<=6) + Bits += B16_17_Code_Len_Last[(Level<0) ? -Level-1 : Level-1][Run]; + else + Bits += 30; + } + + for(i=0; i<=Last; ++i) { + int V = C[Zigzag[i]]*Mult; + if (V>0) + V += Bias; + else + if (V<0) + V -= Bias; + V -= Ref[Zigzag[i]]; + Dist += V*V; + } + Cost = Lambda*Dist + (Bits<<16); + if (DBG==1) + printf( " Last:%2d/%2d Cost = [(Bits=%5.0d) + Lambda*(Dist=%6.0d) = %d ] >>12= %d ", Last,Max, Bits, Dist, Cost, Cost>>12 ); + return Cost; + +#else + return 0; +#endif +} + + +static int +dct_quantize_trellis_h263_c(int16_t *const Out, const int16_t *const In, int Q, const uint16_t * const Zigzag, int Non_Zero) +{ + + /* + * Note: We should search last non-zero coeffs on *real* DCT input coeffs (In[]), + * not quantized one (Out[]). However, it only improves the result *very* + * slightly (~0.01dB), whereas speed drops to crawling level :) + * Well, actually, taking 1 more coeff past Non_Zero into account sometimes helps. + */ + typedef struct { int16_t Run, Level; } NODE; + + NODE Nodes[65], Last; + uint32_t Run_Costs0[64+1]; + uint32_t * const Run_Costs = Run_Costs0 + 1; + const int Mult = 2*Q; + const int Bias = (Q-1) | 1; + const int Lev0 = Mult + Bias; + const int Lambda = Trellis_Lambda_Tabs[Q-1]; /* it's 1/lambda, actually */ + + int Run_Start = -1; + Run_Costs[-1] = 2<<16; /* source (w/ CBP penalty) */ + uint32_t Min_Cost = 2<<16; + + int Last_Node = -1; + uint32_t Last_Cost = 0; + + int i, j; + +#if (DBG>0) + Last.Level = 0; Last.Run = -1; /* just initialize to smthg */ +#endif + + Non_Zero = Find_Last(Out, Zigzag, Non_Zero); + if (Non_Zero<0) + return -1; + + for(i=0; i<=Non_Zero; i++) + { + const int AC = In[Zigzag[i]]; + const int Level1 = Out[Zigzag[i]]; + const int Dist0 = Lambda* AC*AC; + uint32_t Best_Cost = 0xf0000000; + Last_Cost += Dist0; + + if ((uint32_t)(Level1+1)<3) /* very specialized loop for -1,0,+1 */ + { + int dQ; + int Run; + uint32_t Cost0; + + if (AC<0) { + Nodes[i].Level = -1; + dQ = Lev0 + AC; + } else { + Nodes[i].Level = 1; + dQ = Lev0 - AC; + } + Cost0 = Lambda*dQ*dQ; + + Nodes[i].Run = 1; + Best_Cost = (Code_Len20[0]<<16) + Run_Costs[i-1]+Cost0; + for(Run=i-Run_Start; Run>0; --Run) + { + const uint32_t Cost_Base = Cost0 + Run_Costs[i-Run]; + const uint32_t Cost = Cost_Base + (Code_Len20[Run-1]<<16); + const uint32_t lCost = Cost_Base + (Code_Len24[Run-1]<<16); + + /* + * TODO: what about tie-breaks? Should we favor short runs or + * long runs? Although the error is the same, it would not be + * spread the same way along high and low frequencies... + */ + if (Cost>12 ); + else if (j>Run_Start && j>12 ); + else if (j==i) printf( "(%3.0d)", Run_Costs[j]>>12 ); + else printf( " - |" ); + } + printf( "<%3.0d %2d %d>", Min_Cost>>12, Nodes[i].Level, Nodes[i].Run ); + printf( " Last:#%2d {%3.0d %2d %d}", Last_Node, Last_Cost>>12, Last.Level, Last.Run ); + printf( " AC:%3.0d Dist0:%3d Dist(%d)=%d", AC, Dist0>>12, Nodes[i].Level, Cost0>>12 ); + printf( "\n" ); + } + } + else /* "big" levels */ + { + const uint8_t *Tbl_L1, *Tbl_L2, *Tbl_L1_Last, *Tbl_L2_Last; + int Level2; + int dQ1, dQ2; + int Run; + uint32_t Dist1,Dist2; + int dDist21; + + if (Level1>1) { + dQ1 = Level1*Mult-AC + Bias; + dQ2 = dQ1 - Mult; + Level2 = Level1-1; + Tbl_L1 = (Level1<=24) ? B16_17_Code_Len[Level1-1] : Code_Len0; + Tbl_L2 = (Level2<=24) ? B16_17_Code_Len[Level2-1] : Code_Len0; + Tbl_L1_Last = (Level1<=6) ? B16_17_Code_Len_Last[Level1-1] : Code_Len0; + Tbl_L2_Last = (Level2<=6) ? B16_17_Code_Len_Last[Level2-1] : Code_Len0; + } else { /* Level1<-1 */ + dQ1 = Level1*Mult-AC - Bias; + dQ2 = dQ1 + Mult; + Level2 = Level1 + 1; + Tbl_L1 = (Level1>=-24) ? B16_17_Code_Len[Level1^-1] : Code_Len0; + Tbl_L2 = (Level2>=-24) ? B16_17_Code_Len[Level2^-1] : Code_Len0; + Tbl_L1_Last = (Level1>=- 6) ? B16_17_Code_Len_Last[Level1^-1] : Code_Len0; + Tbl_L2_Last = (Level2>=- 6) ? B16_17_Code_Len_Last[Level2^-1] : Code_Len0; + } + Dist1 = Lambda*dQ1*dQ1; + Dist2 = Lambda*dQ2*dQ2; + dDist21 = Dist2-Dist1; + + for(Run=i-Run_Start; Run>0; --Run) + { + const uint32_t Cost_Base = Dist1 + Run_Costs[i-Run]; + uint32_t Cost1, Cost2; + int bLevel; + +/* + * for sub-optimal (but slightly worth it, speed-wise) search, uncomment the following: + * if (Cost_Base>=Best_Cost) continue; + */ + Cost1 = Cost_Base + (Tbl_L1[Run-1]<<16); + Cost2 = Cost_Base + (Tbl_L2[Run-1]<<16) + dDist21; + + if (Cost2>12 ); + else if (j>Run_Start && j>12 ); + else if (j==i) printf( "(%3.0d)", Run_Costs[j]>>12 ); + else printf( " - |" ); + } + printf( "<%3.0d %2d %d>", Min_Cost>>12, Nodes[i].Level, Nodes[i].Run ); + printf( " Last:#%2d {%3.0d %2d %d}", Last_Node, Last_Cost>>12, Last.Level, Last.Run ); + printf( " AC:%3.0d Dist0:%3d Dist(%2d):%3d Dist(%2d):%3d", AC, Dist0>>12, Level1, Dist1>>12, Level2, Dist2>>12 ); + printf( "\n" ); + } + } + + Run_Costs[i] = Best_Cost; + + if (Best_Cost < Min_Cost + Dist0) { + Min_Cost = Best_Cost; + Run_Start = i; + } + else + { + /* + * as noticed by Michael Niedermayer (michaelni at gmx.at), there's + * a code shorter by 1 bit for a larger run (!), same level. We give + * it a chance by not moving the left barrier too much. + */ + + while( Run_Costs[Run_Start]>Min_Cost+(1<<16) ) + Run_Start++; + + /* spread on preceding coeffs the cost incurred by skipping this one */ + for(j=Run_Start; j " ); + for(i=0; i<=Non_Zero; ++i) printf( "[%3.0d] ", Out[Zigzag[i]] ); + printf( "\n" ); + } + } + + if (Last_Node<0) + return -1; + + /* reconstruct optimal sequence backward with surviving paths */ + memset(Out, 0x00, 64*sizeof(*Out)); + Out[Zigzag[Last_Node]] = Last.Level; + i = Last_Node - Last.Run; + while(i>=0) { + Out[Zigzag[i]] = Nodes[i].Level; + i -= Nodes[i].Run; + } + + if (DBG) { + uint32_t Cost = Evaluate_Cost(Out,Mult,Bias, Zigzag,Non_Zero, Lambda); + if (DBG==1) { + printf( "<= " ); + for(i=0; i<=Last_Node; ++i) printf( "[%3.0d] ", Out[Zigzag[i]] ); + printf( "\n--------------------------------\n" ); + } + if (Cost>Last_Cost) printf( "!!! %u > %u\n", Cost, Last_Cost ); + } + return Last_Node; +} + +#undef DBG + +#endif