14 |
#define SIGN(X) (((X)>0)?1:-1) |
#define SIGN(X) (((X)>0)?1:-1) |
15 |
|
|
16 |
|
|
|
// decode an inter macroblock |
|
|
|
|
|
static void |
|
|
rrv_mv_scaleup(VECTOR * mv) |
|
|
{ |
|
|
if (mv->x > 0) { |
|
|
mv->x = 2*mv->x - 1; |
|
|
} else if (mv->x < 0) { |
|
|
mv->x = 2*mv->x + 1; |
|
|
} |
|
|
|
|
|
if (mv->y > 0) { |
|
|
mv->y = 2*mv->y - 1; |
|
|
} else if (mv->y < 0) { |
|
|
mv->y = 2*mv->y + 1; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
17 |
static __inline void |
static __inline void |
18 |
compensate16x16_interpolate(int16_t * const dct_codes, |
compensate16x16_interpolate(int16_t * const dct_codes, |
19 |
uint8_t * const cur, |
uint8_t * const cur, |
33 |
|
|
34 |
if (reduced_resolution) |
if (reduced_resolution) |
35 |
{ |
{ |
36 |
|
const uint8_t * reference; |
37 |
|
x*=2; y*=2; |
38 |
|
|
39 |
/* XXX: todo */ |
reference = get_ref(ref, refh, refv, refhv, x, y, 1, dx, dy, stride); |
|
|
|
|
VECTOR mv; |
|
|
int i,j; |
|
|
uint8_t tmp[18*18]; |
|
|
|
|
|
x*=2; |
|
|
y*=2; |
|
|
|
|
|
mv.x = dx; |
|
|
mv.y = dy; |
|
|
rrv_mv_scaleup(&mv); |
|
|
|
|
|
interpolate32x32_switch( |
|
|
refv, ref, x, y, mv.x, mv.y, stride, rounding); |
|
|
|
|
|
for (j = 0; j < 32; j++) |
|
|
for (i = 0; i < 32; i++) |
|
|
cur[(y+j)*stride + x + i] -= refv[(y+j)*stride + x + i]; |
|
40 |
|
|
41 |
filter_18x18_to_8x8(dct_codes, cur + y*stride + x, stride); |
filter_18x18_to_8x8(dct_codes, cur + y*stride + x, stride); |
42 |
filter_18x18_to_8x8(dct_codes+64, cur + y*stride + x + 16, stride); |
filter_diff_18x18_to_8x8(dct_codes, reference, stride); |
|
filter_18x18_to_8x8(dct_codes+128, cur + (y+16)*stride + x, stride); |
|
|
filter_18x18_to_8x8(dct_codes+192, cur + (y+16)*stride + x + 16, stride); |
|
43 |
|
|
44 |
/* |
filter_18x18_to_8x8(dct_codes+64, cur+y*stride + x + 16, stride); |
45 |
for (j = 0; j < 16; j++) |
filter_diff_18x18_to_8x8(dct_codes+64, reference + 16, stride); |
|
for (i = 0; i < 16; i++) |
|
|
tmp[(j+1)*18 + i+1] = refv[ (y+j)*stride + x+i]; |
|
|
|
|
|
for (i = 1; i < 17; i++) |
|
|
{ |
|
|
tmp[ 0*18 + i] = tmp[ 1*18 + i]; |
|
|
tmp[17*18 + i] = tmp[16*18 + i]; |
|
|
} |
|
|
|
|
|
for (i = 0; i < 18; i++) |
|
|
{ |
|
|
tmp[ i*18 + 0] = tmp[i*18 + 1]; |
|
|
tmp[ i*18 + 17] = tmp[i*18 + 16]; |
|
|
} |
|
|
filter_18x18_to_8x8(dct_codes, tmp, 18); |
|
|
|
|
|
|
|
|
for (j = 0; j < 16; j++) |
|
|
for (i = 0; i < 16; i++) |
|
|
tmp[(j+1)*18 + i+1] = refv[ (y+j)*stride + x+i + 16]; |
|
|
|
|
|
for (i = 1; i < 17; i++) |
|
|
{ |
|
|
tmp[ 0*18 + i] = tmp[ 1*18 + i]; |
|
|
tmp[17*18 + i] = tmp[16*18 + i]; |
|
|
} |
|
|
|
|
|
for (i = 0; i < 18; i++) |
|
|
{ |
|
|
tmp[ i*18 + 0] = tmp[i*18 + 1]; |
|
|
tmp[ i*18 + 17] = tmp[i*18 + 16]; |
|
|
} |
|
|
filter_18x18_to_8x8(dct_codes+64, tmp, 18); |
|
|
|
|
|
for (j = 0; j < 16; j++) |
|
|
for (i = 0; i < 16; i++) |
|
|
tmp[(j+1)*18 + i+1] = refv[ (y+16+j)*stride + x+i]; |
|
|
|
|
|
for (i = 1; i < 17; i++) |
|
|
{ |
|
|
tmp[ 0*18 + i] = tmp[ 1*18 + i]; |
|
|
tmp[17*18 + i] = tmp[16*18 + i]; |
|
|
} |
|
|
|
|
|
for (i = 0; i < 18; i++) |
|
|
{ |
|
|
tmp[ i*18 + 0] = tmp[i*18 + 1]; |
|
|
tmp[ i*18 + 17] = tmp[i*18 + 16]; |
|
|
} |
|
|
filter_18x18_to_8x8(dct_codes+128, tmp, 18); |
|
|
|
|
|
for (j = 0; j < 16; j++) |
|
|
for (i = 0; i < 16; i++) |
|
|
tmp[(j+1)*18 + i+1] = refv[ (y+16+j)*stride + x+i + 16]; |
|
|
|
|
|
for (i = 1; i < 17; i++) |
|
|
{ |
|
|
tmp[ 0*18 + i] = tmp[ 1*18 + i]; |
|
|
tmp[17*18 + i] = tmp[16*18 + i]; |
|
|
} |
|
46 |
|
|
47 |
for (i = 0; i < 18; i++) |
filter_18x18_to_8x8(dct_codes+128, cur+(y+16)*stride + x, stride); |
48 |
{ |
filter_diff_18x18_to_8x8(dct_codes+128, reference + 16*stride, stride); |
|
tmp[ i*18 + 0] = tmp[i*18 + 1]; |
|
|
tmp[ i*18 + 17] = tmp[i*18 + 16]; |
|
|
} |
|
|
filter_18x18_to_8x8(dct_codes+192, tmp, 18); |
|
|
*/ |
|
49 |
|
|
50 |
|
filter_18x18_to_8x8(dct_codes+192, cur+(y+16)*stride + x + 16, stride); |
51 |
|
filter_diff_18x18_to_8x8(dct_codes+192, reference + 16*stride + 16, stride); |
52 |
|
|
53 |
//memset(dct_codes, 0, sizeof(uint16_t) * 64 * 4); |
transfer32x32_copy(cur + y*stride + x, reference, stride); |
54 |
|
|
55 |
}else{ |
}else{ |
56 |
if(quarterpel) { |
if(quarterpel) { |
69 |
} |
} |
70 |
else |
else |
71 |
{ |
{ |
72 |
const uint8_t * reference; |
const uint8_t * reference = get_ref(ref, refh, refv, refhv, x, y, 1, dx, dy, stride); |
73 |
|
|
|
switch (((dx & 1) << 1) + (dy & 1)) // ((dx%2)?2:0)+((dy%2)?1:0) |
|
|
{ |
|
|
case 0: reference = ref + ((y + dy / 2) * stride + x + dx / 2); break; |
|
|
case 1: reference = refv + ((y + (dy-1) / 2) * stride + x + dx / 2); break; |
|
|
case 2: reference = refh + ((y + dy / 2) * stride + x + (dx-1) / 2); break; |
|
|
default: // case 3: |
|
|
reference = refhv + ((y + (dy-1) / 2) * stride + x + (dx-1) / 2); break; |
|
|
} |
|
74 |
transfer_8to16sub(dct_codes, cur + y * stride + x, |
transfer_8to16sub(dct_codes, cur + y * stride + x, |
75 |
reference, stride); |
reference, stride); |
76 |
transfer_8to16sub(dct_codes+64, cur + y * stride + x + 8, |
transfer_8to16sub(dct_codes+64, cur + y * stride + x + 8, |
90 |
const uint8_t * const refh, |
const uint8_t * const refh, |
91 |
const uint8_t * const refv, |
const uint8_t * const refv, |
92 |
const uint8_t * const refhv, |
const uint8_t * const refhv, |
93 |
const uint32_t x, |
uint32_t x, |
94 |
const uint32_t y, |
uint32_t y, |
95 |
const int32_t dx, |
const int32_t dx, |
96 |
const int32_t dy, |
const int32_t dy, |
97 |
const uint32_t stride, |
const uint32_t stride, |
101 |
{ |
{ |
102 |
if (reduced_resolution) |
if (reduced_resolution) |
103 |
{ |
{ |
104 |
// XXX: todo |
const uint8_t * reference; |
105 |
|
x*=2; y*=2; |
106 |
|
|
107 |
|
reference = get_ref(ref, refh, refv, refhv, x, y, 1, dx, dy, stride); |
108 |
|
|
109 |
|
filter_18x18_to_8x8(dct_codes, cur+y*stride + x, stride); |
110 |
|
filter_diff_18x18_to_8x8(dct_codes, reference, stride); |
111 |
|
|
112 |
|
transfer16x16_copy(cur + y*stride + x, reference, stride); |
113 |
|
|
114 |
} else { |
} else { |
115 |
|
|
116 |
if(quarterpel) { |
if(quarterpel) { |
122 |
} |
} |
123 |
else |
else |
124 |
{ |
{ |
125 |
const uint8_t * reference; |
const uint8_t * reference = get_ref(ref, refh, refv, refhv, x, y, 1, dx, dy, stride); |
126 |
|
|
|
switch (((dx & 1) << 1) + (dy & 1)) // ((dx%2)?2:0)+((dy%2)?1:0) |
|
|
{ |
|
|
case 0: reference = ref + ((y + dy / 2) * stride + x + dx / 2); break; |
|
|
case 1: reference = refv + ((y + (dy-1) / 2) * stride + x + dx / 2); break; |
|
|
case 2: reference = refh + ((y + dy / 2) * stride + x + (dx-1) / 2); break; |
|
|
default: // case 3: |
|
|
reference = refhv + ((y + (dy-1) / 2) * stride + x + (dx-1) / 2); break; |
|
|
} |
|
127 |
transfer_8to16sub(dct_codes, cur + y * stride + x, |
transfer_8to16sub(dct_codes, cur + y * stride + x, |
128 |
reference, stride); |
reference, stride); |
129 |
} |
} |
130 |
} |
} |
131 |
} |
} |
132 |
|
|
133 |
|
|
134 |
|
|
135 |
|
/* XXX: slow, inelegant... */ |
136 |
|
static void |
137 |
|
interpolate18x18_switch(uint8_t * const cur, |
138 |
|
const uint8_t * const refn, |
139 |
|
const uint32_t x, |
140 |
|
const uint32_t y, |
141 |
|
const int32_t dx, |
142 |
|
const int dy, |
143 |
|
const uint32_t stride, |
144 |
|
const uint32_t rounding) |
145 |
|
{ |
146 |
|
interpolate8x8_switch(cur, refn, x-1, y-1, dx, dy, stride, rounding); |
147 |
|
interpolate8x8_switch(cur, refn, x+7, y-1, dx, dy, stride, rounding); |
148 |
|
interpolate8x8_switch(cur, refn, x+9, y-1, dx, dy, stride, rounding); |
149 |
|
|
150 |
|
interpolate8x8_switch(cur, refn, x-1, y+7, dx, dy, stride, rounding); |
151 |
|
interpolate8x8_switch(cur, refn, x+7, y+7, dx, dy, stride, rounding); |
152 |
|
interpolate8x8_switch(cur, refn, x+9, y+7, dx, dy, stride, rounding); |
153 |
|
|
154 |
|
interpolate8x8_switch(cur, refn, x-1, y+9, dx, dy, stride, rounding); |
155 |
|
interpolate8x8_switch(cur, refn, x+7, y+9, dx, dy, stride, rounding); |
156 |
|
interpolate8x8_switch(cur, refn, x+9, y+9, dx, dy, stride, rounding); |
157 |
|
} |
158 |
|
|
159 |
|
|
160 |
void |
void |
161 |
MBMotionCompensation(MACROBLOCK * const mb, |
MBMotionCompensation(MACROBLOCK * const mb, |
162 |
const uint32_t i, |
const uint32_t i, |
180 |
int32_t dx = (quarterpel ? mb->qmvs[0].x : mb->mvs[0].x); |
int32_t dx = (quarterpel ? mb->qmvs[0].x : mb->mvs[0].x); |
181 |
int32_t dy = (quarterpel ? mb->qmvs[0].y : mb->mvs[0].y); |
int32_t dy = (quarterpel ? mb->qmvs[0].y : mb->mvs[0].y); |
182 |
|
|
183 |
if ( (mb->mode == MODE_NOT_CODED) && (dx==0) && (dy==0) ) { /* quick copy */ |
if ( (!reduced_resolution) && (mb->mode == MODE_NOT_CODED) && (dx==0) && (dy==0) ) { /* quick copy */ |
184 |
transfer16x16_copy(cur->y + 16 * (i + j * edged_width), |
transfer16x16_copy(cur->y + 16 * (i + j * edged_width), |
185 |
ref->y + 16 * (i + j * edged_width), |
ref->y + 16 * (i + j * edged_width), |
186 |
edged_width); |
edged_width); |
195 |
} |
} |
196 |
/* quick MODE_NOT_CODED for GMC with MV!=(0,0) is still needed */ |
/* quick MODE_NOT_CODED for GMC with MV!=(0,0) is still needed */ |
197 |
|
|
198 |
|
if (reduced_resolution) |
199 |
|
{ |
200 |
|
dx = RRV_MV_SCALEUP(dx); |
201 |
|
dy = RRV_MV_SCALEUP(dy); |
202 |
|
} |
203 |
|
|
204 |
compensate16x16_interpolate(&dct_codes[0 * 64], cur->y, ref->y, refh->y, |
compensate16x16_interpolate(&dct_codes[0 * 64], cur->y, ref->y, refh->y, |
205 |
refv->y, refhv->y, 16 * i, 16 * j, dx, dy, |
refv->y, refhv->y, 16 * i, 16 * j, dx, dy, |
206 |
edged_width, quarterpel, reduced_resolution, rounding); |
edged_width, quarterpel, reduced_resolution, rounding); |
217 |
/* uv-block-based compensation */ |
/* uv-block-based compensation */ |
218 |
if (reduced_resolution) |
if (reduced_resolution) |
219 |
{ |
{ |
220 |
// XXX: todo |
const stride = edged_width/2; |
221 |
|
uint8_t * current, * reference; |
222 |
|
|
223 |
|
current = cur->u + 16*j*stride + 16*i; |
224 |
|
reference = refv->u + 16*j*stride + 16*i; |
225 |
|
interpolate18x18_switch(refv->u, ref->u, 16*i, 16*j, dx, dy, stride, rounding); |
226 |
|
filter_18x18_to_8x8(dct_codes + 4*64, current, stride); |
227 |
|
filter_diff_18x18_to_8x8(dct_codes + 4*64, reference, stride); |
228 |
|
transfer16x16_copy(current, reference, stride); |
229 |
|
|
230 |
|
current = cur->v + 16*j*stride + 16*i; |
231 |
|
reference = refv->v + 16*j*stride + 16*i; |
232 |
|
interpolate18x18_switch(refv->v, ref->v, 16*i, 16*j, dx, dy, stride, rounding); |
233 |
|
filter_18x18_to_8x8(dct_codes + 5*64, current, stride); |
234 |
|
filter_diff_18x18_to_8x8(dct_codes + 5*64, reference, stride); |
235 |
|
transfer16x16_copy(current, reference, stride); |
236 |
}else{ |
}else{ |
237 |
transfer_8to16sub(&dct_codes[4 * 64], |
transfer_8to16sub(&dct_codes[4 * 64], |
238 |
cur->u + 8 * j * edged_width / 2 + 8 * i, |
cur->u + 8 * j * edged_width / 2 + 8 * i, |
248 |
} |
} |
249 |
|
|
250 |
} else { // mode == MODE_INTER4V |
} else { // mode == MODE_INTER4V |
251 |
|
int k; |
252 |
int32_t sum, dx, dy; |
int32_t sum, dx, dy; |
253 |
VECTOR *mvs; |
VECTOR mvs[4]; |
254 |
|
|
255 |
if(quarterpel) |
if(quarterpel) |
256 |
mvs = mb->qmvs; |
for (k = 0; k < 4; k++) mvs[k] = mb->qmvs[k]; |
257 |
else |
else |
258 |
mvs = mb->mvs; |
for (k = 0; k < 4; k++) mvs[k] = mb->mvs[k]; |
259 |
|
|
260 |
if (reduced_resolution) |
if (reduced_resolution) |
261 |
{ |
{ |
262 |
///XXX: todo |
for (k = 0; k < 4; k++) |
263 |
}else{ |
{ |
264 |
|
mvs[k].x = RRV_MV_SCALEUP(mvs[k].x); |
265 |
|
mvs[k].y = RRV_MV_SCALEUP(mvs[k].y); |
266 |
|
} |
267 |
|
} |
268 |
|
|
269 |
compensate8x8_interpolate(&dct_codes[0 * 64], cur->y, ref->y, refh->y, |
compensate8x8_interpolate(&dct_codes[0 * 64], cur->y, ref->y, refh->y, |
270 |
refv->y, refhv->y, 16 * i, 16 * j, mvs[0].x, |
refv->y, refhv->y, 16 * i, 16 * j, mvs[0].x, |
278 |
compensate8x8_interpolate(&dct_codes[3 * 64], cur->y, ref->y, refh->y, |
compensate8x8_interpolate(&dct_codes[3 * 64], cur->y, ref->y, refh->y, |
279 |
refv->y, refhv->y, 16 * i + 8, 16 * j + 8, |
refv->y, refhv->y, 16 * i + 8, 16 * j + 8, |
280 |
mvs[3].x, mvs[3].y, edged_width, quarterpel, reduced_resolution, rounding); |
mvs[3].x, mvs[3].y, edged_width, quarterpel, reduced_resolution, rounding); |
|
} |
|
281 |
|
|
282 |
if(quarterpel) |
if(quarterpel) |
283 |
sum = (mvs[0].x / 2) + (mvs[1].x / 2) + (mvs[2].x / 2) + (mvs[3].x / 2); |
sum = (mvs[0].x / 2) + (mvs[1].x / 2) + (mvs[2].x / 2) + (mvs[3].x / 2); |
293 |
|
|
294 |
dy = (sum >> 3) + roundtab_76[sum & 0xf]; |
dy = (sum >> 3) + roundtab_76[sum & 0xf]; |
295 |
|
|
296 |
|
|
297 |
/* uv-block-based compensation */ |
/* uv-block-based compensation */ |
298 |
if (reduced_resolution) |
if (reduced_resolution) |
299 |
{ |
{ |
300 |
//XXX: todo |
const stride = edged_width/2; |
301 |
|
uint8_t * current, * reference; |
302 |
|
|
303 |
|
current = cur->u + 16*j*stride + 16*i; |
304 |
|
reference = refv->u + 16*j*stride + 16*i; |
305 |
|
interpolate18x18_switch(refv->u, ref->u, 16*i, 16*j, dx, dy, stride, rounding); |
306 |
|
filter_18x18_to_8x8(dct_codes + 4*64, current, stride); |
307 |
|
filter_diff_18x18_to_8x8(dct_codes + 4*64, reference, stride); |
308 |
|
transfer16x16_copy(current, reference, stride); |
309 |
|
|
310 |
|
current = cur->v + 16*j*stride + 16*i; |
311 |
|
reference = refv->v + 16*j*stride + 16*i; |
312 |
|
interpolate18x18_switch(refv->v, ref->v, 16*i, 16*j, dx, dy, stride, rounding); |
313 |
|
filter_18x18_to_8x8(dct_codes + 5*64, current, stride); |
314 |
|
filter_diff_18x18_to_8x8(dct_codes + 5*64, reference, stride); |
315 |
|
transfer16x16_copy(current, reference, stride); |
316 |
|
|
317 |
}else{ |
}else{ |
318 |
transfer_8to16sub(&dct_codes[4 * 64], |
transfer_8to16sub(&dct_codes[4 * 64], |
319 |
cur->u + 8 * j * edged_width / 2 + 8 * i, |
cur->u + 8 * j * edged_width / 2 + 8 * i, |