[svn] / trunk / xvidcore / src / xvid.c Repository:
ViewVC logotype

Diff of /trunk/xvidcore/src/xvid.c

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 311, Thu Jul 18 13:47:46 2002 UTC revision 1764, Wed Dec 6 19:55:07 2006 UTC
# Line 3  Line 3 
3   *  XVID MPEG-4 VIDEO CODEC   *  XVID MPEG-4 VIDEO CODEC
4   *  - Native API implementation  -   *  - Native API implementation  -
5   *   *
6   *  This program is an implementation of a part of one or more MPEG-4   *  Copyright(C) 2001-2004 Peter Ross <pross@xvid.org>
  *  Video tools as specified in ISO/IEC 14496-2 standard.  Those intending  
  *  to use this software module in hardware or software products are  
  *  advised that its use may infringe existing patents or copyrights, and  
  *  any such use would be at such party's own risk.  The original  
  *  developer of this software module and his/her company, and subsequent  
  *  editors and their companies, will have no liability for use of this  
  *  software or modifications or derivatives thereof.  
7   *   *
8   *  This program is free software ; you can redistribute it and/or modify   *  This program is free software ; you can redistribute it and/or modify
9   *  it under the terms of the GNU General Public License as published by   *  it under the terms of the GNU General Public License as published by
# Line 26  Line 19 
19   *  along with this program ; if not, write to the Free Software   *  along with this program ; if not, write to the Free Software
20   *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA   *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
21   *   *
22   ****************************************************************************/   * $Id: xvid.c,v 1.73 2006-12-06 19:55:07 Isibaar Exp $
   
 /*****************************************************************************  
  *  
  *  History  
  *  
  *      - 23.06.2002    added XVID_CPU_CHKONLY  
  *  - 17.03.2002        Added interpolate8x8_halfpel_hv_xmm  
  *  - 22.12.2001  API change: added xvid_init() - Isibaar  
  *  - 16.12.2001        inital version; (c)2001 peter ross <pross@cs.rmit.edu.au>  
  *  
  *  $Id: xvid.c,v 1.31 2002-07-18 13:47:46 suxen_drol Exp $  
23   *   *
24   ****************************************************************************/   ****************************************************************************/
25    
26    #include <stdio.h>
27    #include <stdlib.h>
28    #include <string.h>
29    #include <time.h>
30    
31  #include "xvid.h"  #include "xvid.h"
32  #include "decoder.h"  #include "decoder.h"
33  #include "encoder.h"  #include "encoder.h"
# Line 50  Line 37 
37  #include "image/colorspace.h"  #include "image/colorspace.h"
38  #include "image/interpolate8x8.h"  #include "image/interpolate8x8.h"
39  #include "utils/mem_transfer.h"  #include "utils/mem_transfer.h"
40  #include "quant/quant_h263.h"  #include "utils/mbfunctions.h"
41  #include "quant/quant_mpeg4.h"  #include "quant/quant.h"
42  #include "motion/motion.h"  #include "motion/motion.h"
43    #include "motion/gmc.h"
44  #include "motion/sad.h"  #include "motion/sad.h"
45  #include "utils/emms.h"  #include "utils/emms.h"
46  #include "utils/timer.h"  #include "utils/timer.h"
47  #include "bitstream/mbcoding.h"  #include "bitstream/mbcoding.h"
48    #include "image/qpel.h"
49    #include "image/postprocessing.h"
50    
51  #if defined(ARCH_X86) && defined(EXPERIMENTAL_SSE2_CODE)  #if defined(_DEBUG)
52    unsigned int xvid_debug = 0; /* xvid debug mask */
53    #endif
54    
55  #ifdef WIN32  #if (defined(ARCH_IS_IA32) || defined(ARCH_IS_X86_64)) && defined(_MSC_VER)
56  #include <windows.h>  #include <windows.h>
57  #else  #elif defined(ARCH_IS_IA32) || defined(ARCH_IS_X86_64) || defined(ARCH_IS_PPC)
58  #include <signal.h>  #include <signal.h>
59  #include <setjmp.h>  #include <setjmp.h>
 #endif  
   
   
 #ifndef WIN32  
60    
61  static jmp_buf mark;  static jmp_buf mark;
62    
# Line 81  Line 69 
69    
70    
71  /*  /*
72  calls the funcptr, and returns whether SIGILL (illegal instruction) was signalled   * Calls the funcptr, and returns whether SIGILL (illegal instruction) was
73  return values:   * signalled
74  -1 : could not determine   *
75  0  : SIGILL was *not* signalled   * Return values:
76  1  : SIGILL was signalled   *  -1 : could not determine
77     *   0 : SIGILL was *not* signalled
78     *   1 : SIGILL was signalled
79  */  */
80    #if (defined(ARCH_IS_IA32) || defined(ARCH_IS_X86_64)) && defined(_MSC_VER)
81  int  static int
82  sigill_check(void (*func)())  sigill_check(void (*func)())
83  {  {
 #ifdef WIN32  
84          _try {          _try {
85                  func();                  func();
86          }          } _except(EXCEPTION_EXECUTE_HANDLER) {
         _except(EXCEPTION_EXECUTE_HANDLER) {  
87    
88                  if (_exception_code() == STATUS_ILLEGAL_INSTRUCTION)                  if (_exception_code() == STATUS_ILLEGAL_INSTRUCTION)
89                          return 1;                          return(1);
90          }          }
91          return 0;          return(0);
92  #else  }
93    #elif defined(ARCH_IS_IA32) || defined(ARCH_IS_X86_64) || defined(ARCH_IS_PPC)
94    static int
95    sigill_check(void (*func)())
96    {
97      void * old_handler;      void * old_handler;
98      int jmpret;      int jmpret;
99    
100        /* Set our SIGILL handler */
101      old_handler = signal(SIGILL, sigill_handler);      old_handler = signal(SIGILL, sigill_handler);
102      if (old_handler == SIG_ERR)  
103      {      /* Check for error */
104          return -1;      if (old_handler == SIG_ERR) {
105            return(-1);
106      }      }
107    
108        /* Save stack context, so if func triggers a SIGILL, we can still roll
109             * back to a valid CPU state */
110      jmpret = setjmp(mark);      jmpret = setjmp(mark);
111      if (jmpret == 0)  
112      {          /* If setjmp returned directly, then its returned value is 0, and we still
113             * have to test the passed func. Otherwise it means the stack context has
114             * been restored by a longjmp() call, which in our case happens only in the
115             * signal handler */
116        if (jmpret == 0) {
117          func();          func();
118      }      }
119    
120        /* Restore old signal handler */
121      signal(SIGILL, old_handler);      signal(SIGILL, old_handler);
122    
123      return jmpret;      return(jmpret);
 #endif  
124  }  }
125  #endif  #endif
126    
127    
128    /* detect cpu flags  */
129    static unsigned int
130    detect_cpu_flags(void)
131    {
132            /* enable native assembly optimizations by default */
133            unsigned int cpu_flags = XVID_CPU_ASM;
134    
135    #if defined(ARCH_IS_IA32) || defined(ARCH_IS_X86_64)
136            cpu_flags |= check_cpu_features();
137            if ((cpu_flags & XVID_CPU_SSE) && sigill_check(sse_os_trigger))
138                    cpu_flags &= ~XVID_CPU_SSE;
139    
140            if ((cpu_flags & (XVID_CPU_SSE2|XVID_CPU_SSE3)) && sigill_check(sse2_os_trigger))
141                    cpu_flags &= ~(XVID_CPU_SSE2|XVID_CPU_SSE3);
142    #endif
143    
144    #if defined(ARCH_IS_PPC)
145            if (!sigill_check(altivec_trigger))
146                    cpu_flags |= XVID_CPU_ALTIVEC;
147    #endif
148    
149            return cpu_flags;
150    }
151    
152    
153  /*****************************************************************************  /*****************************************************************************
154   * XviD Init Entry point   * XviD Init Entry point
155   *   *
# Line 139  Line 164 
164   *   *
165   ****************************************************************************/   ****************************************************************************/
166    
 int  
 xvid_init(void *handle,  
                   int opt,  
                   void *param1,  
                   void *param2)  
 {  
         int cpu_flags;  
         XVID_INIT_PARAM *init_param;  
   
         init_param = (XVID_INIT_PARAM *) param1;  
   
         /* Inform the client the API version */  
         init_param->api_version = API_VERSION;  
   
         /* Inform the client the core build - unused because we're still alpha */  
         init_param->core_build = 1000;  
   
         /* Do we have to force CPU features  ? */  
         if ((init_param->cpu_flags & XVID_CPU_FORCE)) {  
167    
168                  cpu_flags = init_param->cpu_flags;  static
169    int xvid_gbl_init(xvid_gbl_init_t * init)
         } else {  
   
                 cpu_flags = check_cpu_features();  
   
 #if defined(ARCH_X86) && defined(EXPERIMENTAL_SSE2_CODE)  
                 if ((cpu_flags & XVID_CPU_SSE) && sigill_check(sse_os_trigger))  
                         cpu_flags &= ~XVID_CPU_SSE;  
   
                 if ((cpu_flags & XVID_CPU_SSE2) && sigill_check(sse2_os_trigger))  
                         cpu_flags &= ~XVID_CPU_SSE2;  
 #endif  
         }  
   
         if ((init_param->cpu_flags & XVID_CPU_CHKONLY))  
170          {          {
171                  init_param->cpu_flags = cpu_flags;          unsigned int cpu_flags;
                 return XVID_ERR_OK;  
         }  
172    
173          init_param->cpu_flags = cpu_flags;          if (XVID_VERSION_MAJOR(init->version) != 1) /* v1.x.x */
174                    return XVID_ERR_VERSION;
175    
176            cpu_flags = (init->cpu_flags & XVID_CPU_FORCE) ? init->cpu_flags : detect_cpu_flags();
177    
178          /* Initialize the function pointers */          /* Initialize the function pointers */
179          idct_int32_init();          idct_int32_init();
# Line 192  Line 184 
184          idct = idct_int32;          idct = idct_int32;
185    
186          /* Only needed on PPC Altivec archs */          /* Only needed on PPC Altivec archs */
187          sadInit = 0;          sadInit = NULL;
188    
189          /* Restore FPU context : emms_c is a nop functions */          /* Restore FPU context : emms_c is a nop functions */
190          emms = emms_c;          emms = emms_c;
191    
192            /* Qpel stuff */
193            xvid_QP_Funcs = &xvid_QP_Funcs_C;
194            xvid_QP_Add_Funcs = &xvid_QP_Add_Funcs_C;
195            xvid_Init_QP();
196    
197          /* Quantization functions */          /* Quantization functions */
198          quant_intra   = quant_intra_c;          quant_h263_intra   = quant_h263_intra_c;
199          dequant_intra = dequant_intra_c;          quant_h263_inter   = quant_h263_inter_c;
200          quant_inter   = quant_inter_c;          dequant_h263_intra = dequant_h263_intra_c;
201          dequant_inter = dequant_inter_c;          dequant_h263_inter = dequant_h263_inter_c;
202    
203          quant4_intra   = quant4_intra_c;          quant_mpeg_intra   = quant_mpeg_intra_c;
204          dequant4_intra = dequant4_intra_c;          quant_mpeg_inter   = quant_mpeg_inter_c;
205          quant4_inter   = quant4_inter_c;          dequant_mpeg_intra = dequant_mpeg_intra_c;
206          dequant4_inter = dequant4_inter_c;          dequant_mpeg_inter = dequant_mpeg_inter_c;
207    
208          /* Block transfer related functions */          /* Block transfer related functions */
209          transfer_8to16copy = transfer_8to16copy_c;          transfer_8to16copy = transfer_8to16copy_c;
210          transfer_16to8copy = transfer_16to8copy_c;          transfer_16to8copy = transfer_16to8copy_c;
211          transfer_8to16sub  = transfer_8to16sub_c;          transfer_8to16sub  = transfer_8to16sub_c;
212            transfer_8to16subro  = transfer_8to16subro_c;
213          transfer_8to16sub2 = transfer_8to16sub2_c;          transfer_8to16sub2 = transfer_8to16sub2_c;
214            transfer_8to16sub2ro = transfer_8to16sub2ro_c;
215          transfer_16to8add  = transfer_16to8add_c;          transfer_16to8add  = transfer_16to8add_c;
216          transfer8x8_copy   = transfer8x8_copy_c;          transfer8x8_copy   = transfer8x8_copy_c;
217            transfer8x4_copy   = transfer8x4_copy_c;
218    
219            /* Interlacing functions */
220            MBFieldTest = MBFieldTest_c;
221    
222          /* Image interpolation related functions */          /* Image interpolation related functions */
223          interpolate8x8_halfpel_h  = interpolate8x8_halfpel_h_c;          interpolate8x8_halfpel_h  = interpolate8x8_halfpel_h_c;
224          interpolate8x8_halfpel_v  = interpolate8x8_halfpel_v_c;          interpolate8x8_halfpel_v  = interpolate8x8_halfpel_v_c;
225          interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_c;          interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_c;
226    
227            interpolate8x4_halfpel_h  = interpolate8x4_halfpel_h_c;
228            interpolate8x4_halfpel_v  = interpolate8x4_halfpel_v_c;
229            interpolate8x4_halfpel_hv = interpolate8x4_halfpel_hv_c;
230    
231            interpolate8x8_halfpel_add = interpolate8x8_halfpel_add_c;
232            interpolate8x8_halfpel_h_add = interpolate8x8_halfpel_h_add_c;
233            interpolate8x8_halfpel_v_add = interpolate8x8_halfpel_v_add_c;
234            interpolate8x8_halfpel_hv_add = interpolate8x8_halfpel_hv_add_c;
235    
236            interpolate16x16_lowpass_h = interpolate16x16_lowpass_h_c;
237            interpolate16x16_lowpass_v = interpolate16x16_lowpass_v_c;
238            interpolate16x16_lowpass_hv = interpolate16x16_lowpass_hv_c;
239    
240            interpolate8x8_lowpass_h = interpolate8x8_lowpass_h_c;
241            interpolate8x8_lowpass_v = interpolate8x8_lowpass_v_c;
242            interpolate8x8_lowpass_hv = interpolate8x8_lowpass_hv_c;
243    
244            interpolate8x8_6tap_lowpass_h = interpolate8x8_6tap_lowpass_h_c;
245            interpolate8x8_6tap_lowpass_v = interpolate8x8_6tap_lowpass_v_c;
246    
247            interpolate8x8_avg2 = interpolate8x8_avg2_c;
248            interpolate8x8_avg4 = interpolate8x8_avg4_c;
249    
250            /* postprocessing */
251            image_brightness = image_brightness_c;
252    
253          /* Initialize internal colorspace transformation tables */          /* Initialize internal colorspace transformation tables */
254          colorspace_init();          colorspace_init();
255    
256          /* All colorspace transformation functions User Format->YV12 */          /* All colorspace transformation functions User Format->YV12 */
257            yv12_to_yv12    = yv12_to_yv12_c;
258          rgb555_to_yv12 = rgb555_to_yv12_c;          rgb555_to_yv12 = rgb555_to_yv12_c;
259          rgb565_to_yv12 = rgb565_to_yv12_c;          rgb565_to_yv12 = rgb565_to_yv12_c;
260          rgb24_to_yv12  = rgb24_to_yv12_c;          rgb_to_yv12     = rgb_to_yv12_c;
261          rgb32_to_yv12  = rgb32_to_yv12_c;          bgr_to_yv12     = bgr_to_yv12_c;
262          yuv_to_yv12    = yuv_to_yv12_c;          bgra_to_yv12    = bgra_to_yv12_c;
263            abgr_to_yv12    = abgr_to_yv12_c;
264            rgba_to_yv12    = rgba_to_yv12_c;
265            argb_to_yv12    = argb_to_yv12_c;
266          yuyv_to_yv12   = yuyv_to_yv12_c;          yuyv_to_yv12   = yuyv_to_yv12_c;
267          uyvy_to_yv12   = uyvy_to_yv12_c;          uyvy_to_yv12   = uyvy_to_yv12_c;
268    
269            rgb555i_to_yv12 = rgb555i_to_yv12_c;
270            rgb565i_to_yv12 = rgb565i_to_yv12_c;
271            bgri_to_yv12    = bgri_to_yv12_c;
272            bgrai_to_yv12   = bgrai_to_yv12_c;
273            abgri_to_yv12   = abgri_to_yv12_c;
274            rgbai_to_yv12   = rgbai_to_yv12_c;
275            argbi_to_yv12   = argbi_to_yv12_c;
276            yuyvi_to_yv12   = yuyvi_to_yv12_c;
277            uyvyi_to_yv12   = uyvyi_to_yv12_c;
278    
279          /* All colorspace transformation functions YV12->User format */          /* All colorspace transformation functions YV12->User format */
280          yv12_to_rgb555 = yv12_to_rgb555_c;          yv12_to_rgb555 = yv12_to_rgb555_c;
281          yv12_to_rgb565 = yv12_to_rgb565_c;          yv12_to_rgb565 = yv12_to_rgb565_c;
282          yv12_to_rgb24  = yv12_to_rgb24_c;          yv12_to_rgb     = yv12_to_rgb_c;
283          yv12_to_rgb32  = yv12_to_rgb32_c;          yv12_to_bgr     = yv12_to_bgr_c;
284          yv12_to_yuv    = yv12_to_yuv_c;          yv12_to_bgra    = yv12_to_bgra_c;
285            yv12_to_abgr    = yv12_to_abgr_c;
286            yv12_to_rgba    = yv12_to_rgba_c;
287            yv12_to_argb    = yv12_to_argb_c;
288          yv12_to_yuyv   = yv12_to_yuyv_c;          yv12_to_yuyv   = yv12_to_yuyv_c;
289          yv12_to_uyvy   = yv12_to_uyvy_c;          yv12_to_uyvy   = yv12_to_uyvy_c;
290    
291            yv12_to_rgb555i = yv12_to_rgb555i_c;
292            yv12_to_rgb565i = yv12_to_rgb565i_c;
293            yv12_to_bgri    = yv12_to_bgri_c;
294            yv12_to_bgrai   = yv12_to_bgrai_c;
295            yv12_to_abgri   = yv12_to_abgri_c;
296            yv12_to_rgbai   = yv12_to_rgbai_c;
297            yv12_to_argbi   = yv12_to_argbi_c;
298            yv12_to_yuyvi   = yv12_to_yuyvi_c;
299            yv12_to_uyvyi   = yv12_to_uyvyi_c;
300    
301          /* Functions used in motion estimation algorithms */          /* Functions used in motion estimation algorithms */
302          calc_cbp = calc_cbp_c;          calc_cbp = calc_cbp_c;
303          sad16    = sad16_c;          sad16    = sad16_c;
         sad16bi  = sad16bi_c;  
304          sad8     = sad8_c;          sad8     = sad8_c;
305            sad16bi    = sad16bi_c;
306            sad8bi     = sad8bi_c;
307          dev16    = dev16_c;          dev16    = dev16_c;
308          Halfpel8_Refine = Halfpel8_Refine_c;          sad16v     = sad16v_c;
309            sse8_16bit = sse8_16bit_c;
310            sse8_8bit  = sse8_8bit_c;
311    
312            init_GMC(cpu_flags);
313    
314  #ifdef ARCH_X86  #if defined(ARCH_IS_IA32)
315          if ((cpu_flags & XVID_CPU_MMX) > 0) {  
316            if ((cpu_flags & XVID_CPU_MMX) || (cpu_flags & XVID_CPU_MMXEXT) ||
317                    (cpu_flags & XVID_CPU_3DNOW) || (cpu_flags & XVID_CPU_3DNOWEXT) ||
318                    (cpu_flags & XVID_CPU_SSE) || (cpu_flags & XVID_CPU_SSE2))
319            {
320                    /* Restore FPU context : emms_c is a nop functions */
321                    emms = emms_mmx;
322            }
323    
324            if ((cpu_flags & XVID_CPU_MMX)) {
325    
326                  /* Forward and Inverse Discrete Cosine Transformation functions */                  /* Forward and Inverse Discrete Cosine Transformation functions */
327                  fdct = fdct_mmx;                  fdct = fdct_mmx_skal;
328                  idct = idct_mmx;                  idct = idct_mmx;
329    
330                  /* To restore FPU context after mmx use */                  /* Qpel stuff */
331                  emms = emms_mmx;                  xvid_QP_Funcs = &xvid_QP_Funcs_mmx;
332                    xvid_QP_Add_Funcs = &xvid_QP_Add_Funcs_mmx;
333    
334                  /* Quantization related functions */                  /* Quantization related functions */
335                  quant_intra   = quant_intra_mmx;                  quant_h263_intra   = quant_h263_intra_mmx;
336                  dequant_intra = dequant_intra_mmx;                  quant_h263_inter   = quant_h263_inter_mmx;
337                  quant_inter   = quant_inter_mmx;                  dequant_h263_intra = dequant_h263_intra_mmx;
338                  dequant_inter = dequant_inter_mmx;                  dequant_h263_inter = dequant_h263_inter_mmx;
339    
340                  quant4_intra   = quant4_intra_mmx;                  quant_mpeg_intra   = quant_mpeg_intra_mmx;
341                  dequant4_intra = dequant4_intra_mmx;                  quant_mpeg_inter   = quant_mpeg_inter_mmx;
342                  quant4_inter   = quant4_inter_mmx;                  dequant_mpeg_intra = dequant_mpeg_intra_mmx;
343                  dequant4_inter = dequant4_inter_mmx;                  dequant_mpeg_inter = dequant_mpeg_inter_mmx;
344    
345                  /* Block related functions */                  /* Block related functions */
346                  transfer_8to16copy = transfer_8to16copy_mmx;                  transfer_8to16copy = transfer_8to16copy_mmx;
347                  transfer_16to8copy = transfer_16to8copy_mmx;                  transfer_16to8copy = transfer_16to8copy_mmx;
348                  transfer_8to16sub  = transfer_8to16sub_mmx;                  transfer_8to16sub  = transfer_8to16sub_mmx;
349                    transfer_8to16subro  = transfer_8to16subro_mmx;
350                  transfer_8to16sub2 = transfer_8to16sub2_mmx;                  transfer_8to16sub2 = transfer_8to16sub2_mmx;
351                  transfer_16to8add  = transfer_16to8add_mmx;                  transfer_16to8add  = transfer_16to8add_mmx;
352                  transfer8x8_copy   = transfer8x8_copy_mmx;                  transfer8x8_copy   = transfer8x8_copy_mmx;
353                    transfer8x4_copy   = transfer8x4_copy_mmx;
354    
355                    /* Interlacing Functions */
356                    MBFieldTest = MBFieldTest_mmx;
357    
358                  /* Image Interpolation related functions */                  /* Image Interpolation related functions */
359                  interpolate8x8_halfpel_h  = interpolate8x8_halfpel_h_mmx;                  interpolate8x8_halfpel_h  = interpolate8x8_halfpel_h_mmx;
360                  interpolate8x8_halfpel_v  = interpolate8x8_halfpel_v_mmx;                  interpolate8x8_halfpel_v  = interpolate8x8_halfpel_v_mmx;
361                  interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_mmx;                  interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_mmx;
362    
363                  /* Image RGB->YV12 related functions */                  interpolate8x4_halfpel_h  = interpolate8x4_halfpel_h_mmx;
364                  rgb24_to_yv12 = rgb24_to_yv12_mmx;                  interpolate8x4_halfpel_v  = interpolate8x4_halfpel_v_mmx;
365                  rgb32_to_yv12 = rgb32_to_yv12_mmx;                  interpolate8x4_halfpel_hv = interpolate8x4_halfpel_hv_mmx;
366                  yuv_to_yv12   = yuv_to_yv12_mmx;  
367                    interpolate8x8_halfpel_add = interpolate8x8_halfpel_add_mmx;
368                    interpolate8x8_halfpel_h_add = interpolate8x8_halfpel_h_add_mmx;
369                    interpolate8x8_halfpel_v_add = interpolate8x8_halfpel_v_add_mmx;
370                    interpolate8x8_halfpel_hv_add = interpolate8x8_halfpel_hv_add_mmx;
371    
372                    interpolate8x8_6tap_lowpass_h = interpolate8x8_6tap_lowpass_h_mmx;
373                    interpolate8x8_6tap_lowpass_v = interpolate8x8_6tap_lowpass_v_mmx;
374    
375                    interpolate8x8_avg2 = interpolate8x8_avg2_mmx;
376                    interpolate8x8_avg4 = interpolate8x8_avg4_mmx;
377    
378                    /* postprocessing */
379                    image_brightness = image_brightness_mmx;
380    
381                    /* image input xxx_to_yv12 related functions */
382                    yv12_to_yv12  = yv12_to_yv12_mmx;
383                    bgr_to_yv12   = bgr_to_yv12_mmx;
384                    rgb_to_yv12   = rgb_to_yv12_mmx;
385                    bgra_to_yv12  = bgra_to_yv12_mmx;
386                    rgba_to_yv12  = rgba_to_yv12_mmx;
387                  yuyv_to_yv12  = yuyv_to_yv12_mmx;                  yuyv_to_yv12  = yuyv_to_yv12_mmx;
388                  uyvy_to_yv12  = uyvy_to_yv12_mmx;                  uyvy_to_yv12  = uyvy_to_yv12_mmx;
389    
390                  /* Image YV12->RGB related functions */                  /* image output yv12_to_xxx related functions */
391                  yv12_to_rgb24 = yv12_to_rgb24_mmx;                  yv12_to_bgr   = yv12_to_bgr_mmx;
392                  yv12_to_rgb32 = yv12_to_rgb32_mmx;                  yv12_to_bgra  = yv12_to_bgra_mmx;
393                  yv12_to_yuyv  = yv12_to_yuyv_mmx;                  yv12_to_yuyv  = yv12_to_yuyv_mmx;
394                  yv12_to_uyvy  = yv12_to_uyvy_mmx;                  yv12_to_uyvy  = yv12_to_uyvy_mmx;
395    
396                    yv12_to_yuyvi = yv12_to_yuyvi_mmx;
397                    yv12_to_uyvyi = yv12_to_uyvyi_mmx;
398    
399                  /* Motion estimation related functions */                  /* Motion estimation related functions */
400                  calc_cbp = calc_cbp_mmx;                  calc_cbp = calc_cbp_mmx;
401                  sad16    = sad16_mmx;                  sad16    = sad16_mmx;
402                  sad8     = sad8_mmx;                  sad8     = sad8_mmx;
403                    sad16bi    = sad16bi_mmx;
404                    sad8bi     = sad8bi_mmx;
405                  dev16    = dev16_mmx;                  dev16    = dev16_mmx;
406                    sad16v     = sad16v_mmx;
407                    sse8_16bit = sse8_16bit_mmx;
408                    sse8_8bit  = sse8_8bit_mmx;
409            }
410    
411            /* these 3dnow functions are faster than mmx, but slower than xmm. */
412            if ((cpu_flags & XVID_CPU_3DNOW)) {
413    
414                    emms = emms_3dn;
415    
416                    /* ME functions */
417                    sad16bi = sad16bi_3dn;
418                    sad8bi  = sad8bi_3dn;
419    
420                    yuyv_to_yv12  = yuyv_to_yv12_3dn;
421                    uyvy_to_yv12  = uyvy_to_yv12_3dn;
422          }          }
423    
         if ((cpu_flags & XVID_CPU_MMXEXT) > 0) {  
424    
425                  /* Inverse DCT */          if ((cpu_flags & XVID_CPU_MMXEXT)) {
426    
427                    /* DCT */
428                    fdct = fdct_xmm_skal;
429                  idct = idct_xmm;                  idct = idct_xmm;
430    
431                  /* Interpolation */                  /* Interpolation */
# Line 316  Line 433 
433                  interpolate8x8_halfpel_v  = interpolate8x8_halfpel_v_xmm;                  interpolate8x8_halfpel_v  = interpolate8x8_halfpel_v_xmm;
434                  interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_xmm;                  interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_xmm;
435    
436                    interpolate8x4_halfpel_h  = interpolate8x4_halfpel_h_xmm;
437                    interpolate8x4_halfpel_v  = interpolate8x4_halfpel_v_xmm;
438                    interpolate8x4_halfpel_hv = interpolate8x4_halfpel_hv_xmm;
439    
440                    interpolate8x8_halfpel_add = interpolate8x8_halfpel_add_xmm;
441                    interpolate8x8_halfpel_h_add = interpolate8x8_halfpel_h_add_xmm;
442                    interpolate8x8_halfpel_v_add = interpolate8x8_halfpel_v_add_xmm;
443                    interpolate8x8_halfpel_hv_add = interpolate8x8_halfpel_hv_add_xmm;
444    
445                  /* Quantization */                  /* Quantization */
446                  dequant_intra = dequant_intra_xmm;                  quant_mpeg_inter = quant_mpeg_inter_xmm;
447                  dequant_inter = dequant_inter_xmm;  
448                    dequant_h263_intra = dequant_h263_intra_xmm;
449                    dequant_h263_inter = dequant_h263_inter_xmm;
450    
451                  /* Buffer transfer */                  /* Buffer transfer */
452                  transfer_8to16sub2 = transfer_8to16sub2_xmm;                  transfer_8to16sub2 = transfer_8to16sub2_xmm;
453                    transfer_8to16sub2ro = transfer_8to16sub2ro_xmm;
454    
455                  /* Colorspace transformation */                  /* Colorspace transformation */
456                  yuv_to_yv12 = yuv_to_yv12_xmm;                  yv12_to_yv12  = yv12_to_yv12_xmm;
457                    yuyv_to_yv12  = yuyv_to_yv12_xmm;
458                    uyvy_to_yv12  = uyvy_to_yv12_xmm;
459    
460                  /* ME functions */                  /* ME functions */
461                  sad16 = sad16_xmm;                  sad16 = sad16_xmm;
                 sad16bi = sad16bi_xmm;  
462                  sad8  = sad8_xmm;                  sad8  = sad8_xmm;
463                    sad16bi = sad16bi_xmm;
464                    sad8bi  = sad8bi_xmm;
465                  dev16 = dev16_xmm;                  dev16 = dev16_xmm;
466                    sad16v   = sad16v_xmm;
467          }          }
468    
469          if ((cpu_flags & XVID_CPU_3DNOW) > 0) {          if ((cpu_flags & XVID_CPU_3DNOW)) {
470    
471                  /* Interpolation */                  /* Interpolation */
472                  interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_3dn;                  interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_3dn;
473                  interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_3dn;                  interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_3dn;
474                  interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_3dn;                  interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_3dn;
475    
476                    interpolate8x4_halfpel_h = interpolate8x4_halfpel_h_3dn;
477                    interpolate8x4_halfpel_v = interpolate8x4_halfpel_v_3dn;
478                    interpolate8x4_halfpel_hv = interpolate8x4_halfpel_hv_3dn;
479          }          }
480    
481          if ((cpu_flags & XVID_CPU_SSE2) > 0) {          if ((cpu_flags & XVID_CPU_3DNOWEXT)) {
482  #ifdef EXPERIMENTAL_SSE2_CODE  
483                    /* Buffer transfer */
484                    transfer_8to16copy =  transfer_8to16copy_3dne;
485                    transfer_16to8copy = transfer_16to8copy_3dne;
486                    transfer_8to16sub =  transfer_8to16sub_3dne;
487                    transfer_8to16subro =  transfer_8to16subro_3dne;
488                    transfer_16to8add = transfer_16to8add_3dne;
489                    transfer8x8_copy = transfer8x8_copy_3dne;
490                    transfer8x4_copy = transfer8x4_copy_3dne;
491    
492                    if ((cpu_flags & XVID_CPU_MMXEXT)) {
493                            /* Inverse DCT */
494                            idct =  idct_3dne;
495    
496                            /* Buffer transfer */
497                            transfer_8to16sub2 =  transfer_8to16sub2_3dne;
498    
499                            /* Interpolation */
500                            interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_3dne;
501                            interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_3dne;
502                            interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_3dne;
503    
504                            interpolate8x4_halfpel_h = interpolate8x4_halfpel_h_3dne;
505                            interpolate8x4_halfpel_v = interpolate8x4_halfpel_v_3dne;
506                            interpolate8x4_halfpel_hv = interpolate8x4_halfpel_hv_3dne;
507    
508                            /* Quantization */
509                            quant_h263_intra = quant_h263_intra_3dne;               /* cmov only */
510                            quant_h263_inter = quant_h263_inter_3dne;
511                            dequant_mpeg_intra = dequant_mpeg_intra_3dne;   /* cmov only */
512                            dequant_mpeg_inter = dequant_mpeg_inter_3dne;
513                            dequant_h263_intra = dequant_h263_intra_3dne;
514                            dequant_h263_inter = dequant_h263_inter_3dne;
515    
516                            /* ME functions */
517                            calc_cbp = calc_cbp_3dne;
518    
519                            sad16 = sad16_3dne;
520                            sad8 = sad8_3dne;
521                            sad16bi = sad16bi_3dne;
522                            sad8bi = sad8bi_3dne;
523                            dev16 = dev16_3dne;
524                    }
525            }
526    
527            if ((cpu_flags & XVID_CPU_SSE2)) {
528    
529                  calc_cbp = calc_cbp_sse2;                  calc_cbp = calc_cbp_sse2;
530    
531                  /* Quantization */                  /* Quantization */
532                  quant_intra   = quant_intra_sse2;                  quant_h263_intra   = quant_h263_intra_sse2;
533                  dequant_intra = dequant_intra_sse2;                  quant_h263_inter   = quant_h263_inter_sse2;
534                  quant_inter   = quant_inter_sse2;                  dequant_h263_intra = dequant_h263_intra_sse2;
535                  dequant_inter = dequant_inter_sse2;                  dequant_h263_inter = dequant_h263_inter_sse2;
536    
537                  /* ME */                  /* SAD operators */
538                  sad16    = sad16_sse2;                  sad16    = sad16_sse2;
539                  dev16    = dev16_sse2;                  dev16    = dev16_sse2;
540    
541                  /* Forward and Inverse DCT */                  /* DCT operators */
542                  idct  = idct_sse2;                  fdct = fdct_sse2_skal;
543                  fdct = fdct_sse2;      /* idct = idct_sse2_skal; */   /* Is now IEEE1180 and Walken compliant. Disabled until fully tested. */
544  #endif  
545                    /* postprocessing */
546                    image_brightness = image_brightness_sse2;
547          }          }
548    
549    #if 0 // TODO: test...
550            if ((cpu_flags & XVID_CPU_SSE3)) {
551    
552                    /* SAD operators */
553                    sad16    = sad16_sse3;
554                    dev16    = dev16_sse3;
555            }
556  #endif  #endif
557    #endif /* ARCH_IS_IA32 */
558    
559  #ifdef ARCH_IA64  #if defined(ARCH_IS_IA64)
560          if ((cpu_flags & XVID_CPU_IA64) > 0) { //use assembler routines?          if ((cpu_flags & XVID_CPU_ASM)) { /* use assembler routines? */
561            idct_ia64_init();            idct_ia64_init();
562            fdct = fdct_ia64;            fdct = fdct_ia64;
563            idct = idct_ia64;   //not yet working, crashes            idct = idct_ia64;   /*not yet working, crashes */
564            interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_ia64;            interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_ia64;
565            interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_ia64;            interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_ia64;
566            interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_ia64;            interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_ia64;
# Line 377  Line 568 
568            sad16bi = sad16bi_ia64;            sad16bi = sad16bi_ia64;
569            sad8 = sad8_ia64;            sad8 = sad8_ia64;
570            dev16 = dev16_ia64;            dev16 = dev16_ia64;
571            Halfpel8_Refine = Halfpel8_Refine_ia64;  /*        Halfpel8_Refine = Halfpel8_Refine_ia64; */
572            quant_intra = quant_intra_ia64;            quant_h263_intra = quant_h263_intra_ia64;
573            dequant_intra = dequant_intra_ia64;            quant_h263_inter = quant_h263_inter_ia64;
574            quant_inter = quant_inter_ia64;            dequant_h263_intra = dequant_h263_intra_ia64;
575            dequant_inter = dequant_inter_ia64;            dequant_h263_inter = dequant_h263_inter_ia64;
576            transfer_8to16copy = transfer_8to16copy_ia64;            transfer_8to16copy = transfer_8to16copy_ia64;
577            transfer_16to8copy = transfer_16to8copy_ia64;            transfer_16to8copy = transfer_16to8copy_ia64;
578            transfer_8to16sub = transfer_8to16sub_ia64;            transfer_8to16sub = transfer_8to16sub_ia64;
579            transfer_8to16sub2 = transfer_8to16sub2_ia64;            transfer_8to16sub2 = transfer_8to16sub2_ia64;
580            transfer_16to8add = transfer_16to8add_ia64;            transfer_16to8add = transfer_16to8add_ia64;
581            transfer8x8_copy = transfer8x8_copy_ia64;            transfer8x8_copy = transfer8x8_copy_ia64;
           DEBUG("Using IA-64 assembler routines.\n");  
582          }          }
583  #endif  #endif
584    
585  #ifdef ARCH_PPC  #if defined(ARCH_IS_PPC)
586  #ifdef ARCH_PPC_ALTIVEC          if ((cpu_flags & XVID_CPU_ALTIVEC)) {
587          calc_cbp = calc_cbp_altivec;            /* sad operators */
588          fdct = fdct_altivec;                    sad16 = sad16_altivec_c;
589          idct = idct_altivec;                    sad16bi = sad16bi_altivec_c;
590          sadInit = sadInit_altivec;                    sad8 = sad8_altivec_c;
591          sad16 = sad16_altivec;                    dev16 = dev16_altivec_c;
592          sad8 = sad8_altivec;  
593          dev16 = dev16_altivec;            sse8_16bit = sse8_16bit_altivec_c;
594  #else  
595          calc_cbp = calc_cbp_ppc;            /* mem transfer */
596              transfer_8to16copy = transfer_8to16copy_altivec_c;
597              transfer_16to8copy = transfer_16to8copy_altivec_c;
598              transfer_8to16sub = transfer_8to16sub_altivec_c;
599              transfer_8to16subro = transfer_8to16subro_altivec_c;
600              transfer_8to16sub2 = transfer_8to16sub2_altivec_c;
601              transfer_16to8add = transfer_16to8add_altivec_c;
602              transfer8x8_copy = transfer8x8_copy_altivec_c;
603    
604              /* Inverse DCT */
605              idct = idct_altivec_c;
606    
607              /* Interpolation */
608              interpolate8x8_halfpel_h = interpolate8x8_halfpel_h_altivec_c;
609              interpolate8x8_halfpel_v = interpolate8x8_halfpel_v_altivec_c;
610              interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_altivec_c;
611    
612              interpolate8x8_avg2 = interpolate8x8_avg2_altivec_c;
613              interpolate8x8_avg4 = interpolate8x8_avg4_altivec_c;
614    
615                      interpolate8x8_halfpel_add = interpolate8x8_halfpel_add_altivec_c;
616                      interpolate8x8_halfpel_h_add = interpolate8x8_halfpel_h_add_altivec_c;
617                      interpolate8x8_halfpel_v_add = interpolate8x8_halfpel_v_add_altivec_c;
618                      interpolate8x8_halfpel_hv_add = interpolate8x8_halfpel_hv_add_altivec_c;
619    
620              /* Colorspace conversion */
621              bgra_to_yv12 = bgra_to_yv12_altivec_c;
622              abgr_to_yv12 = abgr_to_yv12_altivec_c;
623              rgba_to_yv12 = rgba_to_yv12_altivec_c;
624              argb_to_yv12 = argb_to_yv12_altivec_c;
625    
626              yuyv_to_yv12 = yuyv_to_yv12_altivec_c;
627              uyvy_to_yv12 = uyvy_to_yv12_altivec_c;
628    
629              yv12_to_yuyv = yv12_to_yuyv_altivec_c;
630              yv12_to_uyvy = yv12_to_uyvy_altivec_c;
631    
632              /* Quantization */
633              quant_h263_intra = quant_h263_intra_altivec_c;
634              quant_h263_inter = quant_h263_inter_altivec_c;
635              dequant_h263_intra = dequant_h263_intra_altivec_c;
636              dequant_h263_inter = dequant_h263_inter_altivec_c;
637    
638                      dequant_mpeg_intra = dequant_mpeg_intra_altivec_c;
639                      dequant_mpeg_inter = dequant_mpeg_inter_altivec_c;
640    
641                      /* Qpel stuff */
642                      xvid_QP_Funcs = &xvid_QP_Funcs_Altivec_C;
643                      xvid_QP_Add_Funcs = &xvid_QP_Add_Funcs_Altivec_C;
644            }
645  #endif  #endif
646    
647    #if defined(ARCH_IS_X86_64)
648            /* For now, only XVID_CPU_ASM is looked for, so user can still
649             * disable asm usage the usual way. When Intel EMT64 cpus will
650             * be out, maybe we'll have to check more precisely what cpu
651             * features there really are. */
652            if (cpu_flags & XVID_CPU_ASM) {
653                    /* SIMD state flusher */
654                    emms = emms_mmx;
655    
656                    /* DCT operators */
657                    fdct = fdct_skal_x86_64;
658                    idct = idct_x86_64;
659    
660                    /* SAD operators */
661                    sad16      = sad16_x86_64;
662                    sad8       = sad8_x86_64;
663                    sad16bi    = sad16bi_x86_64;
664                    sad8bi     = sad8bi_x86_64;
665                    dev16      = dev16_x86_64;
666                    sad16v     = sad16v_x86_64;
667                    sse8_16bit = sse8_16bit_x86_64;
668                    sse8_8bit  = sse8_8bit_x86_64;
669    
670                    /* Interpolation operators */
671                    interpolate8x8_halfpel_h  = interpolate8x8_halfpel_h_x86_64;
672                    interpolate8x8_halfpel_v  = interpolate8x8_halfpel_v_x86_64;
673                    interpolate8x8_halfpel_hv = interpolate8x8_halfpel_hv_x86_64;
674    
675                    interpolate8x8_halfpel_add = interpolate8x8_halfpel_add_x86_64;
676                    interpolate8x8_halfpel_h_add = interpolate8x8_halfpel_h_add_x86_64;
677                    interpolate8x8_halfpel_v_add = interpolate8x8_halfpel_v_add_x86_64;
678                    interpolate8x8_halfpel_hv_add = interpolate8x8_halfpel_hv_add_x86_64;
679    
680                    interpolate8x8_6tap_lowpass_h = interpolate8x8_6tap_lowpass_h_x86_64;
681                    interpolate8x8_6tap_lowpass_v = interpolate8x8_6tap_lowpass_v_x86_64;
682    
683                    interpolate8x8_avg2 = interpolate8x8_avg2_x86_64;
684                    interpolate8x8_avg4 = interpolate8x8_avg4_x86_64;
685    
686                    /* Quantization related functions */
687                    quant_h263_intra   = quant_h263_intra_x86_64;
688                    quant_h263_inter   = quant_h263_inter_x86_64;
689                    dequant_h263_intra = dequant_h263_intra_x86_64;
690                    dequant_h263_inter = dequant_h263_inter_x86_64;
691                    /*quant_mpeg_intra   = quant_mpeg_intra_x86_64; fix me! */
692                    quant_mpeg_inter   = quant_mpeg_inter_x86_64;
693                    dequant_mpeg_intra   = dequant_mpeg_intra_x86_64;
694                    dequant_mpeg_inter   = dequant_mpeg_inter_x86_64;
695    
696                    /* Block related functions */
697                    transfer_8to16copy  = transfer_8to16copy_x86_64;
698                    transfer_16to8copy  = transfer_16to8copy_x86_64;
699                    transfer_8to16sub   = transfer_8to16sub_x86_64;
700                    transfer_8to16subro = transfer_8to16subro_x86_64;
701                    transfer_8to16sub2  = transfer_8to16sub2_x86_64;
702                    transfer_8to16sub2ro= transfer_8to16sub2ro_x86_64;
703                    transfer_16to8add   = transfer_16to8add_x86_64;
704                    transfer8x8_copy    = transfer8x8_copy_x86_64;
705    
706                    /* Qpel stuff */
707                    xvid_QP_Funcs = &xvid_QP_Funcs_x86_64;
708                    xvid_QP_Add_Funcs = &xvid_QP_Add_Funcs_x86_64;
709    
710                    /* Interlacing Functions */
711                    MBFieldTest = MBFieldTest_x86_64;
712            }
713  #endif  #endif
714    
715          return XVID_ERR_OK;  #if defined(_DEBUG)
716        xvid_debug = init->debug;
717    #endif
718    
719        return(0);
720    }
721    
722    
723    static int
724    xvid_gbl_info(xvid_gbl_info_t * info)
725    {
726            if (XVID_VERSION_MAJOR(info->version) != 1) /* v1.x.x */
727                    return XVID_ERR_VERSION;
728    
729            info->actual_version = XVID_VERSION;
730            info->build = "xvid-1.2.0-dev";
731            info->cpu_flags = detect_cpu_flags();
732      info->num_threads = 0;
733    
734    #if defined(WIN32)
735      {
736        DWORD dwProcessAffinityMask, dwSystemAffinityMask;
737        if (GetProcessAffinityMask(GetCurrentProcess(), &dwProcessAffinityMask, &dwSystemAffinityMask)) {
738          int i;
739          for(i=0; i<32; i++) {
740            if ((dwProcessAffinityMask & (1<<i)))
741              info->num_threads++;
742          }
743        }
744      }
745    #endif
746    
747            return 0;
748    }
749    
750    
751    static int
752    xvid_gbl_convert(xvid_gbl_convert_t* convert)
753    {
754            int width;
755            int height;
756            int width2;
757            int height2;
758            IMAGE img;
759    
760            if (XVID_VERSION_MAJOR(convert->version) != 1)   /* v1.x.x */
761                  return XVID_ERR_VERSION;
762    
763    #if 0
764            const int flip1 = (convert->input.colorspace & XVID_CSP_VFLIP) ^ (convert->output.colorspace & XVID_CSP_VFLIP);
765    #endif
766            width = convert->width;
767            height = convert->height;
768            width2 = convert->width/2;
769            height2 = convert->height/2;
770    
771            switch (convert->input.csp & ~XVID_CSP_VFLIP)
772            {
773                    case XVID_CSP_YV12 :
774                            img.y = convert->input.plane[0];
775                            img.v = (uint8_t*)convert->input.plane[0] + convert->input.stride[0]*height;
776                            img.u = (uint8_t*)convert->input.plane[0] + convert->input.stride[0]*height + (convert->input.stride[0]/2)*height2;
777                            image_output(&img, width, height, width,
778                                                    (uint8_t**)convert->output.plane, convert->output.stride,
779                                                    convert->output.csp, convert->interlacing);
780                            break;
781    
782                    default :
783                            return XVID_ERR_FORMAT;
784            }
785    
786    
787            emms();
788            return 0;
789    }
790    
791    /*****************************************************************************
792     * XviD Global Entry point
793     *
794     * Well this function initialize all internal function pointers according
795     * to the CPU features forced by the library client or autodetected (depending
796     * on the XVID_CPU_FORCE flag). It also initializes vlc coding tables and all
797     * image colorspace transformation tables.
798     *
799     ****************************************************************************/
800    
801    
802    int
803    xvid_global(void *handle,
804                      int opt,
805                      void *param1,
806                      void *param2)
807    {
808            switch(opt)
809            {
810                    case XVID_GBL_INIT :
811                            return xvid_gbl_init((xvid_gbl_init_t*)param1);
812    
813            case XVID_GBL_INFO :
814                return xvid_gbl_info((xvid_gbl_info_t*)param1);
815    
816                    case XVID_GBL_CONVERT :
817                            return xvid_gbl_convert((xvid_gbl_convert_t*)param1);
818    
819                    default :
820                            return XVID_ERR_FAIL;
821            }
822  }  }
823    
824  /*****************************************************************************  /*****************************************************************************
# Line 426  Line 838 
838                          void *param2)                          void *param2)
839  {  {
840          switch (opt) {          switch (opt) {
         case XVID_DEC_DECODE:  
                 return decoder_decode((DECODER *) handle, (XVID_DEC_FRAME *) param1);  
   
841          case XVID_DEC_CREATE:          case XVID_DEC_CREATE:
842                  return decoder_create((XVID_DEC_PARAM *) param1);                  return decoder_create((xvid_dec_create_t *) param1);
843    
844          case XVID_DEC_DESTROY:          case XVID_DEC_DESTROY:
845                  return decoder_destroy((DECODER *) handle);                  return decoder_destroy((DECODER *) handle);
846    
847            case XVID_DEC_DECODE:
848                    return decoder_decode((DECODER *) handle, (xvid_dec_frame_t *) param1, (xvid_dec_stats_t*) param2);
849    
850          default:          default:
851                  return XVID_ERR_FAIL;                  return XVID_ERR_FAIL;
852          }          }
# Line 459  Line 871 
871  {  {
872          switch (opt) {          switch (opt) {
873          case XVID_ENC_ENCODE:          case XVID_ENC_ENCODE:
874  #ifdef BFRAMES  
875                  if (((Encoder *) handle)->mbParam.max_bframes >= 0)                  return enc_encode((Encoder *) handle,
876                  return encoder_encode_bframes((Encoder *) handle, (XVID_ENC_FRAME *) param1,                                                            (xvid_enc_frame_t *) param1,
877                                                            (XVID_ENC_STATS *) param2);                                                            (xvid_enc_stats_t *) param2);
                 else  
 #endif  
                 return encoder_encode((Encoder *) handle, (XVID_ENC_FRAME *) param1,  
                                                           (XVID_ENC_STATS *) param2);  
878    
879          case XVID_ENC_CREATE:          case XVID_ENC_CREATE:
880                  return encoder_create((XVID_ENC_PARAM *) param1);                  return enc_create((xvid_enc_create_t *) param1);
881    
882          case XVID_ENC_DESTROY:          case XVID_ENC_DESTROY:
883                  return encoder_destroy((Encoder *) handle);                  return enc_destroy((Encoder *) handle);
884    
885          default:          default:
886                  return XVID_ERR_FAIL;                  return XVID_ERR_FAIL;

Legend:
Removed from v.311  
changed lines
  Added in v.1764

No admin address has been configured
ViewVC Help
Powered by ViewVC 1.0.4