19 |
* along with this program ; if not, write to the Free Software |
* along with this program ; if not, write to the Free Software |
20 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
21 |
* |
* |
22 |
* $Id: qpel.h,v 1.2 2004-03-22 22:36:23 edgomez Exp $ |
* $Id: qpel.h,v 1.8 2008-11-26 01:04:34 Isibaar Exp $ |
23 |
* |
* |
24 |
****************************************************************************/ |
****************************************************************************/ |
25 |
|
|
26 |
#ifndef _XVID_QPEL_H_ |
#ifndef _XVID_QPEL_H_ |
27 |
#define _XVID_QPEL_H_ |
#define _XVID_QPEL_H_ |
28 |
|
|
29 |
|
#include "interpolate8x8.h" |
30 |
#include "../utils/mem_transfer.h" |
#include "../utils/mem_transfer.h" |
31 |
|
|
32 |
/***************************************************************************** |
/***************************************************************************** |
67 |
****************************************************************************/ |
****************************************************************************/ |
68 |
extern void xvid_Init_QP(); |
extern void xvid_Init_QP(); |
69 |
|
|
70 |
|
extern XVID_QP_FUNCS xvid_QP_Funcs_C_ref; /* for P-frames */ |
71 |
|
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C_ref; /* for B-frames */ |
72 |
|
|
73 |
extern XVID_QP_FUNCS xvid_QP_Funcs_C; /* for P-frames */ |
extern XVID_QP_FUNCS xvid_QP_Funcs_C; /* for P-frames */ |
74 |
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C; /* for B-frames */ |
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C; /* for B-frames */ |
75 |
|
|
76 |
#ifdef ARCH_IS_IA32 |
#if defined(ARCH_IS_IA32) || defined(ARCH_IS_X86_64) |
77 |
extern XVID_QP_FUNCS xvid_QP_Funcs_mmx; |
extern XVID_QP_FUNCS xvid_QP_Funcs_mmx; |
78 |
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx; |
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx; |
79 |
#endif |
#endif |
80 |
|
|
81 |
|
#ifdef ARCH_IS_PPC |
82 |
|
extern XVID_QP_FUNCS xvid_QP_Funcs_Altivec_C; |
83 |
|
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_Altivec_C; |
84 |
|
#endif |
85 |
|
|
86 |
extern XVID_QP_FUNCS *xvid_QP_Funcs; /* <- main pointer for enc/dec structure */ |
extern XVID_QP_FUNCS *xvid_QP_Funcs; /* <- main pointer for enc/dec structure */ |
87 |
extern XVID_QP_FUNCS *xvid_QP_Add_Funcs; /* <- main pointer for enc/dec structure */ |
extern XVID_QP_FUNCS *xvid_QP_Add_Funcs; /* <- main pointer for enc/dec structure */ |
88 |
|
|
107 |
****************************************************************************/ |
****************************************************************************/ |
108 |
|
|
109 |
static void __inline |
static void __inline |
110 |
new_interpolate16x16_quarterpel(uint8_t * const cur, |
interpolate16x16_quarterpel(uint8_t * const cur, |
111 |
uint8_t * const refn, |
uint8_t * const refn, |
112 |
uint8_t * const refh, |
uint8_t * const refh, |
113 |
uint8_t * const refv, |
uint8_t * const refv, |
125 |
|
|
126 |
int32_t x_int, y_int; |
int32_t x_int, y_int; |
127 |
|
|
128 |
const int32_t xRef = x*4 + dx; |
const int32_t xRef = (int)x*4 + dx; |
129 |
const int32_t yRef = y*4 + dy; |
const int32_t yRef = (int)y*4 + dy; |
130 |
|
|
131 |
Ops = xvid_QP_Funcs; /* TODO: pass as argument */ |
Ops = xvid_QP_Funcs; |
132 |
quads = (dx&3) | ((dy&3)<<2); |
quads = (dx&3) | ((dy&3)<<2); |
133 |
|
|
134 |
x_int = xRef/4; |
x_int = xRef >> 2; |
135 |
if (xRef < 0 && xRef % 4) |
y_int = yRef >> 2; |
|
x_int--; |
|
|
|
|
|
y_int = yRef/4; |
|
|
if (yRef < 0 && yRef % 4) |
|
|
y_int--; |
|
136 |
|
|
137 |
dst = cur + y * stride + x; |
dst = cur + y * stride + x; |
138 |
src = refn + y_int * stride + x_int; |
src = refn + y_int * (int)stride + x_int; |
139 |
|
|
140 |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
141 |
|
|
204 |
} |
} |
205 |
|
|
206 |
static void __inline |
static void __inline |
207 |
new_interpolate16x8_quarterpel(uint8_t * const cur, |
interpolate16x16_add_quarterpel(uint8_t * const cur, |
208 |
uint8_t * const refn, |
uint8_t * const refn, |
209 |
uint8_t * const refh, |
uint8_t * const refh, |
210 |
uint8_t * const refv, |
uint8_t * const refv, |
219 |
uint8_t *tmp; |
uint8_t *tmp; |
220 |
int32_t quads; |
int32_t quads; |
221 |
const XVID_QP_FUNCS *Ops; |
const XVID_QP_FUNCS *Ops; |
222 |
|
const XVID_QP_FUNCS *Ops_Copy; |
223 |
|
|
224 |
int32_t x_int, y_int; |
int32_t x_int, y_int; |
225 |
|
|
226 |
const int32_t xRef = x*4 + dx; |
const int32_t xRef = (int)x*4 + dx; |
227 |
const int32_t yRef = y*4 + dy; |
const int32_t yRef = (int)y*4 + dy; |
228 |
|
|
229 |
Ops = xvid_QP_Funcs; /* TODO: pass as argument */ |
Ops = xvid_QP_Add_Funcs; |
230 |
|
Ops_Copy = xvid_QP_Funcs; |
231 |
quads = (dx&3) | ((dy&3)<<2); |
quads = (dx&3) | ((dy&3)<<2); |
232 |
|
|
233 |
x_int = xRef/4; |
x_int = xRef >> 2; |
234 |
if (xRef < 0 && xRef % 4) |
y_int = yRef >> 2; |
|
x_int--; |
|
|
|
|
|
y_int = yRef/4; |
|
|
if (yRef < 0 && yRef % 4) |
|
|
y_int--; |
|
235 |
|
|
236 |
dst = cur + y * stride + x; |
dst = cur + y * stride + x; |
237 |
src = refn + y_int * stride + x_int; |
src = refn + y_int * (int)stride + x_int; |
238 |
|
|
239 |
|
tmp = refh; /* we need at least a 16 x stride scratch block */ |
240 |
|
|
241 |
|
switch(quads) { |
242 |
|
case 0: |
243 |
|
/* NB: there is no halfpel involved ! the name's function can be |
244 |
|
* misleading */ |
245 |
|
interpolate8x8_halfpel_add(dst, src, stride, rounding); |
246 |
|
interpolate8x8_halfpel_add(dst+8, src+8, stride, rounding); |
247 |
|
interpolate8x8_halfpel_add(dst+8*stride, src+8*stride, stride, rounding); |
248 |
|
interpolate8x8_halfpel_add(dst+8*stride+8, src+8*stride+8, stride, rounding); |
249 |
|
break; |
250 |
|
case 1: |
251 |
|
Ops->H_Pass_Avrg(dst, src, 16, stride, rounding); |
252 |
|
break; |
253 |
|
case 2: |
254 |
|
Ops->H_Pass(dst, src, 16, stride, rounding); |
255 |
|
break; |
256 |
|
case 3: |
257 |
|
Ops->H_Pass_Avrg_Up(dst, src, 16, stride, rounding); |
258 |
|
break; |
259 |
|
case 4: |
260 |
|
Ops->V_Pass_Avrg(dst, src, 16, stride, rounding); |
261 |
|
break; |
262 |
|
case 5: |
263 |
|
Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); |
264 |
|
Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); |
265 |
|
break; |
266 |
|
case 6: |
267 |
|
Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); |
268 |
|
Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); |
269 |
|
break; |
270 |
|
case 7: |
271 |
|
Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); |
272 |
|
Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding); |
273 |
|
break; |
274 |
|
case 8: |
275 |
|
Ops->V_Pass(dst, src, 16, stride, rounding); |
276 |
|
break; |
277 |
|
case 9: |
278 |
|
Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); |
279 |
|
Ops->V_Pass(dst, tmp, 16, stride, rounding); |
280 |
|
break; |
281 |
|
case 10: |
282 |
|
Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); |
283 |
|
Ops->V_Pass(dst, tmp, 16, stride, rounding); |
284 |
|
break; |
285 |
|
case 11: |
286 |
|
Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); |
287 |
|
Ops->V_Pass(dst, tmp, 16, stride, rounding); |
288 |
|
break; |
289 |
|
case 12: |
290 |
|
Ops->V_Pass_Avrg_Up(dst, src, 16, stride, rounding); |
291 |
|
break; |
292 |
|
case 13: |
293 |
|
Ops_Copy->H_Pass_Avrg(tmp, src, 17, stride, rounding); |
294 |
|
Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); |
295 |
|
break; |
296 |
|
case 14: |
297 |
|
Ops_Copy->H_Pass(tmp, src, 17, stride, rounding); |
298 |
|
Ops->V_Pass_Avrg_Up( dst, tmp, 16, stride, rounding); |
299 |
|
break; |
300 |
|
case 15: |
301 |
|
Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding); |
302 |
|
Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding); |
303 |
|
break; |
304 |
|
} |
305 |
|
} |
306 |
|
|
307 |
|
static void __inline |
308 |
|
interpolate16x8_quarterpel(uint8_t * const cur, |
309 |
|
uint8_t * const refn, |
310 |
|
uint8_t * const refh, |
311 |
|
uint8_t * const refv, |
312 |
|
uint8_t * const refhv, |
313 |
|
const uint32_t x, const uint32_t y, |
314 |
|
const int32_t dx, const int dy, |
315 |
|
const uint32_t stride, |
316 |
|
const uint32_t rounding) |
317 |
|
{ |
318 |
|
const uint8_t *src; |
319 |
|
uint8_t *dst; |
320 |
|
uint8_t *tmp; |
321 |
|
int32_t quads; |
322 |
|
const XVID_QP_FUNCS *Ops; |
323 |
|
|
324 |
|
int32_t x_int, y_int; |
325 |
|
|
326 |
|
const int32_t xRef = (int)x*4 + dx; |
327 |
|
const int32_t yRef = (int)y*4 + dy; |
328 |
|
|
329 |
|
Ops = xvid_QP_Funcs; |
330 |
|
quads = (dx&3) | ((dy&3)<<2); |
331 |
|
|
332 |
|
x_int = xRef >> 2; |
333 |
|
y_int = yRef >> 2; |
334 |
|
|
335 |
|
dst = cur + y * stride + x; |
336 |
|
src = refn + y_int * (int)stride + x_int; |
337 |
|
|
338 |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
339 |
|
|
400 |
} |
} |
401 |
|
|
402 |
static void __inline |
static void __inline |
403 |
new_interpolate8x8_quarterpel(uint8_t * const cur, |
interpolate8x8_quarterpel(uint8_t * const cur, |
404 |
uint8_t * const refn, |
uint8_t * const refn, |
405 |
uint8_t * const refh, |
uint8_t * const refh, |
406 |
uint8_t * const refv, |
uint8_t * const refv, |
418 |
|
|
419 |
int32_t x_int, y_int; |
int32_t x_int, y_int; |
420 |
|
|
421 |
const int32_t xRef = x*4 + dx; |
const int32_t xRef = (int)x*4 + dx; |
422 |
const int32_t yRef = y*4 + dy; |
const int32_t yRef = (int)y*4 + dy; |
423 |
|
|
424 |
Ops = xvid_QP_Funcs; /* TODO: pass as argument */ |
Ops = xvid_QP_Funcs; |
425 |
quads = (dx&3) | ((dy&3)<<2); |
quads = (dx&3) | ((dy&3)<<2); |
426 |
|
|
427 |
x_int = xRef/4; |
x_int = xRef >> 2; |
428 |
if (xRef < 0 && xRef % 4) |
y_int = yRef >> 2; |
|
x_int--; |
|
|
|
|
|
y_int = yRef/4; |
|
|
if (yRef < 0 && yRef % 4) |
|
|
y_int--; |
|
429 |
|
|
430 |
dst = cur + y * stride + x; |
dst = cur + y * stride + x; |
431 |
src = refn + y_int * stride + x_int; |
src = refn + y_int * (int)stride + x_int; |
432 |
|
|
433 |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
tmp = refh; /* we need at least a 16 x stride scratch block */ |
434 |
|
|
493 |
} |
} |
494 |
} |
} |
495 |
|
|
496 |
|
static void __inline |
497 |
|
interpolate8x8_add_quarterpel(uint8_t * const cur, |
498 |
|
uint8_t * const refn, |
499 |
|
uint8_t * const refh, |
500 |
|
uint8_t * const refv, |
501 |
|
uint8_t * const refhv, |
502 |
|
const uint32_t x, const uint32_t y, |
503 |
|
const int32_t dx, const int dy, |
504 |
|
const uint32_t stride, |
505 |
|
const uint32_t rounding) |
506 |
|
{ |
507 |
|
const uint8_t *src; |
508 |
|
uint8_t *dst; |
509 |
|
uint8_t *tmp; |
510 |
|
int32_t quads; |
511 |
|
const XVID_QP_FUNCS *Ops; |
512 |
|
const XVID_QP_FUNCS *Ops_Copy; |
513 |
|
|
514 |
|
int32_t x_int, y_int; |
515 |
|
|
516 |
|
const int32_t xRef = (int)x*4 + dx; |
517 |
|
const int32_t yRef = (int)y*4 + dy; |
518 |
|
|
519 |
|
Ops = xvid_QP_Add_Funcs; |
520 |
|
Ops_Copy = xvid_QP_Funcs; |
521 |
|
quads = (dx&3) | ((dy&3)<<2); |
522 |
|
|
523 |
|
x_int = xRef >> 2; |
524 |
|
y_int = yRef >> 2; |
525 |
|
|
526 |
|
dst = cur + y * stride + x; |
527 |
|
src = refn + y_int * (int)stride + x_int; |
528 |
|
|
529 |
|
tmp = refh; /* we need at least a 16 x stride scratch block */ |
530 |
|
|
531 |
|
switch(quads) { |
532 |
|
case 0: |
533 |
|
/* Misleading function name, there is no halfpel involved |
534 |
|
* just dst and src averaging with rounding=0 */ |
535 |
|
interpolate8x8_halfpel_add(dst, src, stride, rounding); |
536 |
|
break; |
537 |
|
case 1: |
538 |
|
Ops->H_Pass_Avrg_8(dst, src, 8, stride, rounding); |
539 |
|
break; |
540 |
|
case 2: |
541 |
|
Ops->H_Pass_8(dst, src, 8, stride, rounding); |
542 |
|
break; |
543 |
|
case 3: |
544 |
|
Ops->H_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); |
545 |
|
break; |
546 |
|
case 4: |
547 |
|
Ops->V_Pass_Avrg_8(dst, src, 8, stride, rounding); |
548 |
|
break; |
549 |
|
case 5: |
550 |
|
Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); |
551 |
|
Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); |
552 |
|
break; |
553 |
|
case 6: |
554 |
|
Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); |
555 |
|
Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); |
556 |
|
break; |
557 |
|
case 7: |
558 |
|
Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); |
559 |
|
Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding); |
560 |
|
break; |
561 |
|
case 8: |
562 |
|
Ops->V_Pass_8(dst, src, 8, stride, rounding); |
563 |
|
break; |
564 |
|
case 9: |
565 |
|
Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); |
566 |
|
Ops->V_Pass_8(dst, tmp, 8, stride, rounding); |
567 |
|
break; |
568 |
|
case 10: |
569 |
|
Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); |
570 |
|
Ops->V_Pass_8(dst, tmp, 8, stride, rounding); |
571 |
|
break; |
572 |
|
case 11: |
573 |
|
Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); |
574 |
|
Ops->V_Pass_8(dst, tmp, 8, stride, rounding); |
575 |
|
break; |
576 |
|
case 12: |
577 |
|
Ops->V_Pass_Avrg_Up_8(dst, src, 8, stride, rounding); |
578 |
|
break; |
579 |
|
case 13: |
580 |
|
Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding); |
581 |
|
Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); |
582 |
|
break; |
583 |
|
case 14: |
584 |
|
Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding); |
585 |
|
Ops->V_Pass_Avrg_Up_8( dst, tmp, 8, stride, rounding); |
586 |
|
break; |
587 |
|
case 15: |
588 |
|
Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding); |
589 |
|
Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding); |
590 |
|
break; |
591 |
|
} |
592 |
|
} |
593 |
|
|
594 |
#endif /* _XVID_QPEL_H_ */ |
#endif /* _XVID_QPEL_H_ */ |