--- trunk/xvidcore/src/image/image.c 2008/11/26 01:04:34 1795 +++ trunk/xvidcore/src/image/image.c 2011/03/08 19:18:44 1959 @@ -3,7 +3,7 @@ * XVID MPEG-4 VIDEO CODEC * - Image management functions - * - * Copyright(C) 2001-2004 Peter Ross + * Copyright(C) 2001-2010 Peter Ross * * This program is free software ; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -19,7 +19,7 @@ * along with this program ; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * - * $Id: image.c,v 1.42 2008-11-26 01:04:34 Isibaar Exp $ + * $Id: image.c,v 1.48 2011-03-08 19:16:32 Isibaar Exp $ * ****************************************************************************/ @@ -127,8 +127,10 @@ memcpy(image1->v, image2->v, edged_width * height / 4); } -/* setedges bug was fixed in this BS version */ +/* setedges bug was in this BS versions */ #define SETEDGES_BUG_BEFORE 18 +#define SETEDGES_BUG_AFTER 57 +#define SETEDGES_BUG_REFIXED 63 void image_setedges(IMAGE * image, @@ -149,7 +151,9 @@ /* According to the Standard Clause 7.6.4, padding is done starting at 16 * pixel width and height multiples. This was not respected in old xvids */ - if (bs_version == 0 || bs_version >= SETEDGES_BUG_BEFORE) { + if (bs_version >= SETEDGES_BUG_BEFORE && + bs_version < SETEDGES_BUG_AFTER || + bs_version >= SETEDGES_BUG_REFIXED) { width = (width+15)&~15; height = (height+15)&~15; } @@ -385,30 +389,46 @@ uint8_t * y_ptr, uint8_t * u_ptr, uint8_t * v_ptr, int y_stride, int uv_stride, int width, int height, int vflip, - packedFunc * func_opt, packedFunc func_c, int size) + packedFunc * func_opt, packedFunc func_c, + int size, int interlacing) { - int width_opt, width_c; + int width_opt, width_c, height_opt; + + if (width<0 || width==1 || height==1) return; /* forget about it */ if (func_opt != func_c && x_stride < size*((width+15)/16)*16) { width_opt = width & (~15); - width_c = width - width_opt; + width_c = (width - width_opt) & (~1); } - else + else if (func_opt != func_c && !(width&1) && (size==3)) { - width_opt = width; + /* MMX reads 4 bytes per pixel for RGB/BGR */ + width_opt = width - 2; + width_c = 2; + } + else { + /* Enforce the width to be divisable by two. */ + width_opt = width & (~1); width_c = 0; } + /* packed conversions require height to be divisable by 2 + (or even by 4 for interlaced conversion) */ + if (interlacing) + height_opt = height & (~3); + else + height_opt = height & (~1); + func_opt(x_ptr, x_stride, y_ptr, u_ptr, v_ptr, y_stride, uv_stride, - width_opt, height, vflip); + width_opt, height_opt, vflip); if (width_c) { func_c(x_ptr + size*width_opt, x_stride, y_ptr + width_opt, u_ptr + width_opt/2, v_ptr + width_opt/2, - y_stride, uv_stride, width_c, height, vflip); + y_stride, uv_stride, width_c, height_opt, vflip); } } @@ -437,7 +457,7 @@ src[0], src_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?rgb555i_to_yv12 :rgb555_to_yv12, - interlacing?rgb555i_to_yv12_c:rgb555_to_yv12_c, 2); + interlacing?rgb555i_to_yv12_c:rgb555_to_yv12_c, 2, interlacing); break; case XVID_CSP_RGB565: @@ -445,7 +465,7 @@ src[0], src_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?rgb565i_to_yv12 :rgb565_to_yv12, - interlacing?rgb565i_to_yv12_c:rgb565_to_yv12_c, 2); + interlacing?rgb565i_to_yv12_c:rgb565_to_yv12_c, 2, interlacing); break; @@ -454,7 +474,7 @@ src[0], src_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?bgri_to_yv12 :bgr_to_yv12, - interlacing?bgri_to_yv12_c:bgr_to_yv12_c, 3); + interlacing?bgri_to_yv12_c:bgr_to_yv12_c, 3, interlacing); break; case XVID_CSP_BGRA: @@ -462,7 +482,7 @@ src[0], src_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?bgrai_to_yv12 :bgra_to_yv12, - interlacing?bgrai_to_yv12_c:bgra_to_yv12_c, 4); + interlacing?bgrai_to_yv12_c:bgra_to_yv12_c, 4, interlacing); break; case XVID_CSP_ABGR : @@ -470,7 +490,7 @@ src[0], src_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?abgri_to_yv12 :abgr_to_yv12, - interlacing?abgri_to_yv12_c:abgr_to_yv12_c, 4); + interlacing?abgri_to_yv12_c:abgr_to_yv12_c, 4, interlacing); break; case XVID_CSP_RGB: @@ -478,7 +498,7 @@ src[0], src_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?rgbi_to_yv12 :rgb_to_yv12, - interlacing?rgbi_to_yv12_c:rgb_to_yv12_c, 3); + interlacing?rgbi_to_yv12_c:rgb_to_yv12_c, 3, interlacing); break; case XVID_CSP_RGBA : @@ -486,7 +506,7 @@ src[0], src_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?rgbai_to_yv12 :rgba_to_yv12, - interlacing?rgbai_to_yv12_c:rgba_to_yv12_c, 4); + interlacing?rgbai_to_yv12_c:rgba_to_yv12_c, 4, interlacing); break; case XVID_CSP_ARGB: @@ -494,7 +514,7 @@ src[0], src_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?argbi_to_yv12 : argb_to_yv12, - interlacing?argbi_to_yv12_c: argb_to_yv12_c, 4); + interlacing?argbi_to_yv12_c: argb_to_yv12_c, 4, interlacing); break; case XVID_CSP_YUY2: @@ -502,7 +522,7 @@ src[0], src_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?yuyvi_to_yv12 :yuyv_to_yv12, - interlacing?yuyvi_to_yv12_c:yuyv_to_yv12_c, 2); + interlacing?yuyvi_to_yv12_c:yuyv_to_yv12_c, 2, interlacing); break; case XVID_CSP_YVYU: /* u/v swapped */ @@ -510,7 +530,7 @@ src[0], src_stride[0], image->y, image->v, image->u, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?yuyvi_to_yv12 :yuyv_to_yv12, - interlacing?yuyvi_to_yv12_c:yuyv_to_yv12_c, 2); + interlacing?yuyvi_to_yv12_c:yuyv_to_yv12_c, 2, interlacing); break; case XVID_CSP_UYVY: @@ -518,7 +538,7 @@ src[0], src_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?uyvyi_to_yv12 :uyvy_to_yv12, - interlacing?uyvyi_to_yv12_c:uyvy_to_yv12_c, 2); + interlacing?uyvyi_to_yv12_c:uyvy_to_yv12_c, 2, interlacing); break; case XVID_CSP_I420: /* YCbCr == YUV == internal colorspace for MPEG */ @@ -622,7 +642,7 @@ dst[0], dst_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?yv12_to_rgb555i :yv12_to_rgb555, - interlacing?yv12_to_rgb555i_c:yv12_to_rgb555_c, 2); + interlacing?yv12_to_rgb555i_c:yv12_to_rgb555_c, 2, interlacing); return 0; case XVID_CSP_RGB565: @@ -630,7 +650,7 @@ dst[0], dst_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?yv12_to_rgb565i :yv12_to_rgb565, - interlacing?yv12_to_rgb565i_c:yv12_to_rgb565_c, 2); + interlacing?yv12_to_rgb565i_c:yv12_to_rgb565_c, 2, interlacing); return 0; case XVID_CSP_BGR: @@ -638,7 +658,7 @@ dst[0], dst_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?yv12_to_bgri :yv12_to_bgr, - interlacing?yv12_to_bgri_c:yv12_to_bgr_c, 3); + interlacing?yv12_to_bgri_c:yv12_to_bgr_c, 3, interlacing); return 0; case XVID_CSP_BGRA: @@ -646,7 +666,7 @@ dst[0], dst_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?yv12_to_bgrai :yv12_to_bgra, - interlacing?yv12_to_bgrai_c:yv12_to_bgra_c, 4); + interlacing?yv12_to_bgrai_c:yv12_to_bgra_c, 4, interlacing); return 0; case XVID_CSP_ABGR: @@ -654,7 +674,7 @@ dst[0], dst_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?yv12_to_abgri :yv12_to_abgr, - interlacing?yv12_to_abgri_c:yv12_to_abgr_c, 4); + interlacing?yv12_to_abgri_c:yv12_to_abgr_c, 4, interlacing); return 0; case XVID_CSP_RGB: @@ -662,7 +682,7 @@ dst[0], dst_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?yv12_to_rgbi :yv12_to_rgb, - interlacing?yv12_to_rgbi_c:yv12_to_rgb_c, 3); + interlacing?yv12_to_rgbi_c:yv12_to_rgb_c, 3, interlacing); return 0; case XVID_CSP_RGBA: @@ -670,7 +690,7 @@ dst[0], dst_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?yv12_to_rgbai :yv12_to_rgba, - interlacing?yv12_to_rgbai_c:yv12_to_rgba_c, 4); + interlacing?yv12_to_rgbai_c:yv12_to_rgba_c, 4, interlacing); return 0; case XVID_CSP_ARGB: @@ -678,7 +698,7 @@ dst[0], dst_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?yv12_to_argbi :yv12_to_argb, - interlacing?yv12_to_argbi_c:yv12_to_argb_c, 4); + interlacing?yv12_to_argbi_c:yv12_to_argb_c, 4, interlacing); return 0; case XVID_CSP_YUY2: @@ -686,7 +706,7 @@ dst[0], dst_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?yv12_to_yuyvi :yv12_to_yuyv, - interlacing?yv12_to_yuyvi_c:yv12_to_yuyv_c, 2); + interlacing?yv12_to_yuyvi_c:yv12_to_yuyv_c, 2, interlacing); return 0; case XVID_CSP_YVYU: /* u,v swapped */ @@ -694,7 +714,7 @@ dst[0], dst_stride[0], image->y, image->v, image->u, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?yv12_to_yuyvi :yv12_to_yuyv, - interlacing?yv12_to_yuyvi_c:yv12_to_yuyv_c, 2); + interlacing?yv12_to_yuyvi_c:yv12_to_yuyv_c, 2, interlacing); return 0; case XVID_CSP_UYVY: @@ -702,7 +722,7 @@ dst[0], dst_stride[0], image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), interlacing?yv12_to_uyvyi :yv12_to_uyvy, - interlacing?yv12_to_uyvyi_c:yv12_to_uyvy_c, 2); + interlacing?yv12_to_uyvyi_c:yv12_to_uyvy_c, 2, interlacing); return 0; case XVID_CSP_I420: /* YCbCr == YUV == internal colorspace for MPEG */ @@ -846,6 +866,98 @@ return (sse); } +void image_block_variance(IMAGE * orig_image, + uint16_t stride, + MACROBLOCK *mbs, + uint16_t mb_width, + uint16_t mb_height) +{ + DECLARE_ALIGNED_MATRIX(sums, 1, 4, uint16_t, CACHE_LINE); + DECLARE_ALIGNED_MATRIX(squares, 1, 4, uint32_t, CACHE_LINE); + + int x, y, i, j; + uint8_t *orig_y = orig_image->y; + uint8_t *orig_u = orig_image->u; + uint8_t *orig_v = orig_image->v; + + for (y = 0; y < mb_height; y++) { + for (x = 0; x < mb_width; x++) { + MACROBLOCK *pMB = &mbs[x + y * mb_width]; + uint32_t var4[4]; + uint32_t sum = 0, square = 0; + + /* y-blocks */ + for (j = 0; j < 2; j++) { + for (i = 0; i < 2; i++) { + int lsum = blocksum8(orig_y + ((y<<4) + (j<<3))*stride + (x<<4) + (i<<3), + stride, sums, squares); + int lsquare = (squares[0] + squares[1] + squares[2] + squares[3])<<6; + + sum += lsum; + square += lsquare; + + var4[0] = (squares[0]<<4) - sums[0]*sums[0]; + var4[1] = (squares[1]<<4) - sums[1]*sums[1]; + var4[2] = (squares[2]<<4) - sums[2]*sums[2]; + var4[3] = (squares[3]<<4) - sums[3]*sums[3]; + + pMB->rel_var8[j*2 + i] = lsquare - lsum*lsum; + if (pMB->rel_var8[j*2 + i]) + pMB->rel_var8[j*2 + i] = ((var4[0] + var4[1] + var4[2] + var4[3])<<8) / + pMB->rel_var8[j*2 + i]; /* 4*(Var(Di)/Var(D)) */ + else + pMB->rel_var8[j*2 + i] = 64; + } + } + + /* u */ + { + int lsum = blocksum8(orig_u + (y<<3)*(stride>>1) + (x<<3), + stride, sums, squares); + int lsquare = (squares[0] + squares[1] + squares[2] + squares[3])<<6; + + sum += lsum; + square += lsquare; + + var4[0] = (squares[0]<<4) - sums[0]*sums[0]; + var4[1] = (squares[1]<<4) - sums[1]*sums[1]; + var4[2] = (squares[2]<<4) - sums[2]*sums[2]; + var4[3] = (squares[3]<<4) - sums[3]*sums[3]; + + pMB->rel_var8[4] = lsquare - lsum*lsum; + if (pMB->rel_var8[4]) + pMB->rel_var8[4] = ((var4[0] + var4[1] + var4[2] + var4[3])<<8) / + pMB->rel_var8[4]; /* 4*(Var(Di)/Var(D)) */ + else + pMB->rel_var8[4] = 64; + } + + /* v */ + { + int lsum = blocksum8(orig_v + (y<<3)*(stride>>1) + (x<<3), + stride, sums, squares); + int lsquare = (squares[0] + squares[1] + squares[2] + squares[3])<<6; + + sum += lsum; + square += lsquare; + + var4[0] = (squares[0]<<4) - sums[0]*sums[0]; + var4[1] = (squares[1]<<4) - sums[1]*sums[1]; + var4[2] = (squares[2]<<4) - sums[2]*sums[2]; + var4[3] = (squares[3]<<4) - sums[3]*sums[3]; + + pMB->rel_var8[5] = lsquare - lsum*lsum; + if (pMB->rel_var8[5]) + pMB->rel_var8[5] = ((var4[0] + var4[1] + var4[2] + var4[3])<<8) / + pMB->rel_var8[5]; /* 4*(Var(Di)/Var(D)) */ + else + pMB->rel_var8[5] = 64; + } + + } + } +} + #if 0 #include