--- trunk/xvidcore/src/utils/mbtransquant.c 2003/02/15 08:39:17 850 +++ trunk/xvidcore/src/utils/mbtransquant.c 2003/02/15 15:22:19 851 @@ -1,61 +1,55 @@ -/***************************************************************************** - * - * XVID MPEG-4 VIDEO CODEC - * - MacroBlock transfer and quantization - - * - * Copyright(C) 2002-2001 Christoph Lampert - * 2002-2001 Michael Militzer - * 2002-2001 Peter Ross - * 2002 Daniel Smith - * - * This file is part of XviD, a free MPEG-4 video encoder/decoder - * - * XviD is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * Under section 8 of the GNU General Public License, the copyright - * holders of XVID explicitly forbid distribution in the following - * countries: - * - * - Japan - * - United States of America - * - * Linking XviD statically or dynamically with other modules is making a - * combined work based on XviD. Thus, the terms and conditions of the - * GNU General Public License cover the whole combination. - * - * As a special exception, the copyright holders of XviD give you - * permission to link XviD with independent modules that communicate with - * XviD solely through the VFW1.1 and DShow interfaces, regardless of the - * license terms of these independent modules, and to copy and distribute - * the resulting combined work under terms of your choice, provided that - * every copy of the combined work is accompanied by a complete copy of - * the source code of XviD (the version of XviD used to produce the - * combined work), being distributed under the terms of the GNU General - * Public License plus this exception. An independent module is a module - * which is not derived from or based on XviD. - * - * Note that people who make modified versions of XviD are not obligated - * to grant this special exception for their modified versions; it is - * their choice whether to do so. The GNU General Public License gives - * permission to release a modified version without this exception; this - * exception also makes it possible to release a modified version which - * carries forward this exception. - * - * $Id: mbtransquant.c,v 1.20 2002-11-26 23:44:11 edgomez Exp $ - * - ****************************************************************************/ + /****************************************************************************** + * * + * This file is part of XviD, a free MPEG-4 video encoder/decoder * + * * + * XviD is an implementation of a part of one or more MPEG-4 Video tools * + * as specified in ISO/IEC 14496-2 standard. Those intending to use this * + * software module in hardware or software products are advised that its * + * use may infringe existing patents or copyrights, and any such use * + * would be at such party's own risk. The original developer of this * + * software module and his/her company, and subsequent editors and their * + * companies, will have no liability for use of this software or * + * modifications or derivatives thereof. * + * * + * XviD is free software; you can redistribute it and/or modify it * + * under the terms of the GNU General Public License as published by * + * the Free Software Foundation; either version 2 of the License, or * + * (at your option) any later version. * + * * + * XviD is distributed in the hope that it will be useful, but * + * WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program; if not, write to the Free Software * + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * + * * + ******************************************************************************/ + + /****************************************************************************** + * * + * mbtransquant.c * + * * + * Copyright (C) 2001 - Peter Ross * + * Copyright (C) 2001 - Michael Militzer * + * * + * For more information visit the XviD homepage: http://www.xvid.org * + * * + ******************************************************************************/ + + /****************************************************************************** + * * + * Revision history: * + * * + * 29.03.2002 interlacing speedup - used transfer strides instead of * + * manual field-to-frame conversion * + * 26.03.2002 interlacing support - moved transfers outside loops * + * 22.12.2001 get_dc_scaler() moved to common.h * + * 19.11.2001 introduced coefficient thresholding (Isibaar) * + * 17.11.2001 initial version * + * * + ******************************************************************************/ #include @@ -71,12 +65,11 @@ #include "../quant/quant_h263.h" #include "../encoder.h" -#define MIN(X, Y) ((X)<(Y)?(X):(Y)) -#define MAX(X, Y) ((X)>(Y)?(X):(Y)) +#include "../image/reduced.h" -#define TOOSMALL_LIMIT 3 /* skip blocks having a coefficient sum below this value */ +MBFIELDTEST_PTR MBFieldTest; -/* this isnt pretty, but its better than 20 ifdefs */ +#define TOOSMALL_LIMIT 1 /* skip blocks having a coefficient sum below this value */ void MBTransQuantIntra(const MBParam * pParam, @@ -90,25 +83,40 @@ uint32_t stride = pParam->edged_width; uint32_t stride2 = stride / 2; - uint32_t next_block = stride * 8; + uint32_t next_block = stride * ((frame->global_flags & XVID_REDUCED)?16:8); uint32_t i; uint32_t iQuant = frame->quant; uint8_t *pY_Cur, *pU_Cur, *pV_Cur; IMAGE *pCurrent = &frame->image; - pY_Cur = pCurrent->y + (y_pos << 4) * stride + (x_pos << 4); - pU_Cur = pCurrent->u + (y_pos << 3) * stride2 + (x_pos << 3); - pV_Cur = pCurrent->v + (y_pos << 3) * stride2 + (x_pos << 3); - start_timer(); - transfer_8to16copy(&data[0 * 64], pY_Cur, stride); - transfer_8to16copy(&data[1 * 64], pY_Cur + 8, stride); - transfer_8to16copy(&data[2 * 64], pY_Cur + next_block, stride); - transfer_8to16copy(&data[3 * 64], pY_Cur + next_block + 8, stride); - transfer_8to16copy(&data[4 * 64], pU_Cur, stride2); - transfer_8to16copy(&data[5 * 64], pV_Cur, stride2); + if ((frame->global_flags & XVID_REDUCED)) + { + pY_Cur = pCurrent->y + (y_pos << 5) * stride + (x_pos << 5); + pU_Cur = pCurrent->u + (y_pos << 4) * stride2 + (x_pos << 4); + pV_Cur = pCurrent->v + (y_pos << 4) * stride2 + (x_pos << 4); + + filter_18x18_to_8x8(&data[0 * 64], pY_Cur, stride); + filter_18x18_to_8x8(&data[1 * 64], pY_Cur + 16, stride); + filter_18x18_to_8x8(&data[2 * 64], pY_Cur + next_block, stride); + filter_18x18_to_8x8(&data[3 * 64], pY_Cur + next_block + 16, stride); + filter_18x18_to_8x8(&data[4 * 64], pU_Cur, stride2); + filter_18x18_to_8x8(&data[5 * 64], pV_Cur, stride2); + }else{ + pY_Cur = pCurrent->y + (y_pos << 4) * stride + (x_pos << 4); + pU_Cur = pCurrent->u + (y_pos << 3) * stride2 + (x_pos << 3); + pV_Cur = pCurrent->v + (y_pos << 3) * stride2 + (x_pos << 3); + + transfer_8to16copy(&data[0 * 64], pY_Cur, stride); + transfer_8to16copy(&data[1 * 64], pY_Cur + 8, stride); + transfer_8to16copy(&data[2 * 64], pY_Cur + next_block, stride); + transfer_8to16copy(&data[3 * 64], pY_Cur + next_block + 8, stride); + transfer_8to16copy(&data[4 * 64], pU_Cur, stride2); + transfer_8to16copy(&data[5 * 64], pV_Cur, stride2); + } stop_transfer_timer(); + /* XXX: rrv+interlacing is buggy */ start_timer(); pMB->field_dct = 0; if ((frame->global_flags & XVID_INTERLACING) && @@ -129,38 +137,60 @@ start_timer(); quant_intra(&qcoeff[i * 64], &data[i * 64], iQuant, iDcScaler); stop_quant_timer(); - - start_timer(); - dequant_intra(&data[i * 64], &qcoeff[i * 64], iQuant, iDcScaler); - stop_iquant_timer(); } else { start_timer(); quant4_intra(&qcoeff[i * 64], &data[i * 64], iQuant, iDcScaler); stop_quant_timer(); + } + + /* speedup: dont decode when encoding only ivops */ + if (pParam->iMaxKeyInterval != 1 || pParam->max_bframes > 0) + { + if (pParam->m_quant_type == H263_QUANT) { + start_timer(); + dequant_intra(&data[i * 64], &qcoeff[i * 64], iQuant, iDcScaler); + stop_iquant_timer(); + } else { + start_timer(); + dequant4_intra(&data[i * 64], &qcoeff[i * 64], iQuant, iDcScaler); + stop_iquant_timer(); + } start_timer(); - dequant4_intra(&data[i * 64], &qcoeff[i * 64], iQuant, iDcScaler); - stop_iquant_timer(); + idct(&data[i * 64]); + stop_idct_timer(); } - - start_timer(); - idct(&data[i * 64]); - stop_idct_timer(); } - if (pMB->field_dct) { - next_block = stride; - stride *= 2; - } + /* speedup: dont decode when encoding only ivops */ + if (pParam->iMaxKeyInterval != 1 || pParam->max_bframes > 0) + { + + if (pMB->field_dct) { + next_block = stride; + stride *= 2; + } - start_timer(); - transfer_16to8copy(pY_Cur, &data[0 * 64], stride); - transfer_16to8copy(pY_Cur + 8, &data[1 * 64], stride); - transfer_16to8copy(pY_Cur + next_block, &data[2 * 64], stride); - transfer_16to8copy(pY_Cur + next_block + 8, &data[3 * 64], stride); - transfer_16to8copy(pU_Cur, &data[4 * 64], stride2); - transfer_16to8copy(pV_Cur, &data[5 * 64], stride2); - stop_transfer_timer(); + start_timer(); + if ((frame->global_flags & XVID_REDUCED)) + { + copy_upsampled_8x8_16to8(pY_Cur, &data[0 * 64], stride); + copy_upsampled_8x8_16to8(pY_Cur + 16, &data[1 * 64], stride); + copy_upsampled_8x8_16to8(pY_Cur + next_block, &data[2 * 64], stride); + copy_upsampled_8x8_16to8(pY_Cur + next_block + 16, &data[3 * 64], stride); + copy_upsampled_8x8_16to8(pU_Cur, &data[4 * 64], stride2); + copy_upsampled_8x8_16to8(pV_Cur, &data[5 * 64], stride2); + + }else{ + transfer_16to8copy(pY_Cur, &data[0 * 64], stride); + transfer_16to8copy(pY_Cur + 8, &data[1 * 64], stride); + transfer_16to8copy(pY_Cur + next_block, &data[2 * 64], stride); + transfer_16to8copy(pY_Cur + next_block + 8, &data[3 * 64], stride); + transfer_16to8copy(pU_Cur, &data[4 * 64], stride2); + transfer_16to8copy(pV_Cur, &data[5 * 64], stride2); + } + stop_transfer_timer(); + } } @@ -177,7 +207,7 @@ uint32_t stride = pParam->edged_width; uint32_t stride2 = stride / 2; - uint32_t next_block = stride * 8; + uint32_t next_block = stride * ((frame->global_flags & XVID_REDUCED)?16:8); uint32_t i; uint32_t iQuant = frame->quant; uint8_t *pY_Cur, *pU_Cur, *pV_Cur; @@ -185,9 +215,16 @@ uint32_t sum; IMAGE *pCurrent = &frame->image; - pY_Cur = pCurrent->y + (y_pos << 4) * stride + (x_pos << 4); - pU_Cur = pCurrent->u + (y_pos << 3) * stride2 + (x_pos << 3); - pV_Cur = pCurrent->v + (y_pos << 3) * stride2 + (x_pos << 3); + if ((frame->global_flags & XVID_REDUCED)) + { + pY_Cur = pCurrent->y + (y_pos << 5) * stride + (x_pos << 5); + pU_Cur = pCurrent->u + (y_pos << 4) * stride2 + (x_pos << 4); + pV_Cur = pCurrent->v + (y_pos << 4) * stride2 + (x_pos << 4); + }else{ + pY_Cur = pCurrent->y + (y_pos << 4) * stride + (x_pos << 4); + pU_Cur = pCurrent->u + (y_pos << 3) * stride2 + (x_pos << 3); + pV_Cur = pCurrent->v + (y_pos << 3) * stride2 + (x_pos << 3); + } start_timer(); pMB->field_dct = 0; @@ -199,6 +236,8 @@ stop_interlacing_timer(); for (i = 0; i < 6; i++) { + uint32_t increase_limit = (iQuant == 1) ? 1 : 0; + /* * no need to transfer 8->16-bit * (this is performed already in motion compensation) @@ -217,7 +256,7 @@ stop_quant_timer(); } - if ((sum >= TOOSMALL_LIMIT) || (qcoeff[i*64] != 0) || + if ((sum >= TOOSMALL_LIMIT + increase_limit) || (qcoeff[i*64] != 0) || (qcoeff[i*64+1] != 0) || (qcoeff[i*64+8] != 0)) { if (pParam->m_quant_type == H263_QUANT) { @@ -244,18 +283,34 @@ } start_timer(); - if (cbp & 32) - transfer_16to8add(pY_Cur, &data[0 * 64], stride); - if (cbp & 16) - transfer_16to8add(pY_Cur + 8, &data[1 * 64], stride); - if (cbp & 8) - transfer_16to8add(pY_Cur + next_block, &data[2 * 64], stride); - if (cbp & 4) - transfer_16to8add(pY_Cur + next_block + 8, &data[3 * 64], stride); - if (cbp & 2) - transfer_16to8add(pU_Cur, &data[4 * 64], stride2); - if (cbp & 1) - transfer_16to8add(pV_Cur, &data[5 * 64], stride2); + if ((frame->global_flags & XVID_REDUCED)) + { + if (cbp & 32) + add_upsampled_8x8_16to8(pY_Cur, &data[0 * 64], stride); + if (cbp & 16) + add_upsampled_8x8_16to8(pY_Cur + 16, &data[1 * 64], stride); + if (cbp & 8) + add_upsampled_8x8_16to8(pY_Cur + next_block, &data[2 * 64], stride); + if (cbp & 4) + add_upsampled_8x8_16to8(pY_Cur + 16 + next_block, &data[3 * 64], stride); + if (cbp & 2) + add_upsampled_8x8_16to8(pU_Cur, &data[4 * 64], stride2); + if (cbp & 1) + add_upsampled_8x8_16to8(pV_Cur, &data[5 * 64], stride2); + }else{ + if (cbp & 32) + transfer_16to8add(pY_Cur, &data[0 * 64], stride); + if (cbp & 16) + transfer_16to8add(pY_Cur + 8, &data[1 * 64], stride); + if (cbp & 8) + transfer_16to8add(pY_Cur + next_block, &data[2 * 64], stride); + if (cbp & 4) + transfer_16to8add(pY_Cur + next_block + 8, &data[3 * 64], stride); + if (cbp & 2) + transfer_16to8add(pU_Cur, &data[4 * 64], stride2); + if (cbp & 1) + transfer_16to8add(pV_Cur, &data[5 * 64], stride2); + } stop_transfer_timer(); return cbp; @@ -388,8 +443,8 @@ MBQuantIntra(const MBParam * pParam, FRAMEINFO * frame, MACROBLOCK *pMB, - int16_t data[6 * 64], - int16_t qcoeff[6 * 64]) + int16_t qcoeff[6 * 64], + int16_t data[6*64]) { int i; int iQuant = frame->quant; @@ -462,7 +517,7 @@ stop_quant_timer(); } - if (sum >= TOOSMALL_LIMIT) { /* skip block ? */ + if (sum >= TOOSMALL_LIMIT) { // skip block ? cbp |= 1 << (5 - i); } } @@ -582,13 +637,27 @@ -/* if sum(diff between field lines) < sum(diff between frame lines), use field dct */ +/* permute block and return field dct choice */ uint32_t MBDecideFieldDCT(int16_t data[6 * 64]) { + uint32_t field = MBFieldTest(data); + + if (field) { + MBFrameToField(data); + } + + return field; +} + +/* if sum(diff between field lines) < sum(diff between frame lines), use field dct */ + +uint32_t +MBFieldTest_c(int16_t data[6 * 64]) +{ const uint8_t blocks[] = { 0 * 64, 0 * 64, 0 * 64, 0 * 64, 2 * 64, 2 * 64, 2 * 64, 2 * 64 }; const uint8_t lines[] = { 0, 16, 32, 48, 0, 16, 32, 48 }; @@ -622,11 +691,7 @@ } } - if (frame > (field + 350)) { - MBFrameToField(data); - } - - return (frame > (field + 350)); + return (frame >= (field + 350)); } @@ -642,26 +707,26 @@ /* left blocks */ - /* 1=2, 2=4, 4=8, 8=1 */ + // 1=2, 2=4, 4=8, 8=1 MOVLINE(tmp, LINE(0, 1)); MOVLINE(LINE(0, 1), LINE(0, 2)); MOVLINE(LINE(0, 2), LINE(0, 4)); MOVLINE(LINE(0, 4), LINE(2, 0)); MOVLINE(LINE(2, 0), tmp); - /* 3=6, 6=12, 12=9, 9=3 */ + // 3=6, 6=12, 12=9, 9=3 MOVLINE(tmp, LINE(0, 3)); MOVLINE(LINE(0, 3), LINE(0, 6)); MOVLINE(LINE(0, 6), LINE(2, 4)); MOVLINE(LINE(2, 4), LINE(2, 1)); MOVLINE(LINE(2, 1), tmp); - /* 5=10, 10=5 */ + // 5=10, 10=5 MOVLINE(tmp, LINE(0, 5)); MOVLINE(LINE(0, 5), LINE(2, 2)); MOVLINE(LINE(2, 2), tmp); - /* 7=14, 14=13, 13=11, 11=7 */ + // 7=14, 14=13, 13=11, 11=7 MOVLINE(tmp, LINE(0, 7)); MOVLINE(LINE(0, 7), LINE(2, 6)); MOVLINE(LINE(2, 6), LINE(2, 5)); @@ -670,26 +735,26 @@ /* right blocks */ - /* 1=2, 2=4, 4=8, 8=1 */ + // 1=2, 2=4, 4=8, 8=1 MOVLINE(tmp, LINE(1, 1)); MOVLINE(LINE(1, 1), LINE(1, 2)); MOVLINE(LINE(1, 2), LINE(1, 4)); MOVLINE(LINE(1, 4), LINE(3, 0)); MOVLINE(LINE(3, 0), tmp); - /* 3=6, 6=12, 12=9, 9=3 */ + // 3=6, 6=12, 12=9, 9=3 MOVLINE(tmp, LINE(1, 3)); MOVLINE(LINE(1, 3), LINE(1, 6)); MOVLINE(LINE(1, 6), LINE(3, 4)); MOVLINE(LINE(3, 4), LINE(3, 1)); MOVLINE(LINE(3, 1), tmp); - /* 5=10, 10=5 */ + // 5=10, 10=5 MOVLINE(tmp, LINE(1, 5)); MOVLINE(LINE(1, 5), LINE(3, 2)); MOVLINE(LINE(3, 2), tmp); - /* 7=14, 14=13, 13=11, 11=7 */ + // 7=14, 14=13, 13=11, 11=7 MOVLINE(tmp, LINE(1, 7)); MOVLINE(LINE(1, 7), LINE(3, 6)); MOVLINE(LINE(3, 6), LINE(3, 5));