147 |
const uint32_t quant, |
const uint32_t quant, |
148 |
const uint32_t intra_dc_threshold) |
const uint32_t intra_dc_threshold) |
149 |
{ |
{ |
150 |
|
#ifdef LINUX |
151 |
|
DECLARE_ALIGNED_MATRIX(block,6,64,int16_t,16); |
152 |
|
DECLARE_ALIGNED_MATRIX(data,6,64,int16_t,16); |
153 |
|
#else |
154 |
CACHE_ALIGN int16_t block[6][64]; |
CACHE_ALIGN int16_t block[6][64]; |
155 |
CACHE_ALIGN int16_t data[6][64]; |
CACHE_ALIGN int16_t data[6][64]; |
156 |
|
#endif |
157 |
const uint32_t stride = dec->edged_width; |
const uint32_t stride = dec->edged_width; |
158 |
uint32_t i; |
uint32_t i; |
159 |
uint32_t iQuant = pMB->quant; |
uint32_t iQuant = pMB->quant; |
163 |
pU_Cur = dec->cur.u + (y_pos << 3) * (stride >> 1) + (x_pos << 3); |
pU_Cur = dec->cur.u + (y_pos << 3) * (stride >> 1) + (x_pos << 3); |
164 |
pV_Cur = dec->cur.v + (y_pos << 3) * (stride >> 1) + (x_pos << 3); |
pV_Cur = dec->cur.v + (y_pos << 3) * (stride >> 1) + (x_pos << 3); |
165 |
|
|
166 |
|
#ifdef LINUX |
167 |
|
memset(block,0,sizeof(int16_t)*6*64); |
168 |
|
#else |
169 |
memset(block, 0, sizeof(block)); // clear |
memset(block, 0, sizeof(block)); // clear |
170 |
|
#endif |
171 |
|
|
172 |
for (i = 0; i < 6; i++) |
for (i = 0; i < 6; i++) |
173 |
{ |
{ |
270 |
const uint32_t quant, |
const uint32_t quant, |
271 |
const uint32_t rounding) |
const uint32_t rounding) |
272 |
{ |
{ |
273 |
|
#ifdef LINUX |
274 |
|
DECLARE_ALIGNED_MATRIX(block,6,64,int16_t,16); |
275 |
|
DECLARE_ALIGNED_MATRIX(data,6,64,int16_t,16); |
276 |
|
#else |
277 |
CACHE_ALIGN int16_t block[6][64]; |
CACHE_ALIGN int16_t block[6][64]; |
278 |
CACHE_ALIGN int16_t data[6][64]; |
CACHE_ALIGN int16_t data[6][64]; |
279 |
|
#endif |
280 |
|
|
281 |
const uint32_t stride = dec->edged_width; |
const uint32_t stride = dec->edged_width; |
282 |
const uint32_t stride2 = dec->edged_width / 2; |
const uint32_t stride2 = dec->edged_width / 2; |