Parent Directory | Revision Log
Revision 1586 - (view) (download)
1 : | edgomez | 1586 | ;/***************************************************************************** |
2 : | ; * | ||
3 : | ; * XVID MPEG-4 VIDEO CODEC | ||
4 : | ; * - mmx 8x8 block-based halfpel interpolation - | ||
5 : | ; * | ||
6 : | ; * Copyright(C) 2002 Michael Militzer <isibaar@xvid.org> | ||
7 : | ; * 2002 Pascal Massimino <skal@planet-d.net> | ||
8 : | ; * 2004 Andre Werthmann <wertmann@aei.mpg.de> | ||
9 : | ; * | ||
10 : | ; * This program is free software ; you can redistribute it and/or modify | ||
11 : | ; * it under the terms of the GNU General Public License as published by | ||
12 : | ; * the Free Software Foundation ; either version 2 of the License, or | ||
13 : | ; * (at your option) any later version. | ||
14 : | ; * | ||
15 : | ; * This program is distributed in the hope that it will be useful, | ||
16 : | ; * but WITHOUT ANY WARRANTY ; without even the implied warranty of | ||
17 : | ; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 : | ; * GNU General Public License for more details. | ||
19 : | ; * | ||
20 : | ; * You should have received a copy of the GNU General Public License | ||
21 : | ; * along with this program ; if not, write to the Free Software | ||
22 : | ; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
23 : | ; * | ||
24 : | ; ****************************************************************************/ | ||
25 : | |||
26 : | BITS 64 | ||
27 : | |||
28 : | %macro cglobal 1 | ||
29 : | %ifdef PREFIX | ||
30 : | %ifdef MARK_FUNCS | ||
31 : | global _%1:function %1.endfunc-%1 | ||
32 : | %define %1 _%1:function %1.endfunc-%1 | ||
33 : | %else | ||
34 : | global _%1 | ||
35 : | %define %1 _%1 | ||
36 : | %endif | ||
37 : | %else | ||
38 : | %ifdef MARK_FUNCS | ||
39 : | global %1:function %1.endfunc-%1 | ||
40 : | %else | ||
41 : | global %1 | ||
42 : | %endif | ||
43 : | %endif | ||
44 : | %endmacro | ||
45 : | |||
46 : | ;============================================================================= | ||
47 : | ; Read only data | ||
48 : | ;============================================================================= | ||
49 : | |||
50 : | %ifdef FORMAT_COFF | ||
51 : | SECTION .rodata | ||
52 : | %else | ||
53 : | SECTION .rodata align=16 | ||
54 : | %endif | ||
55 : | |||
56 : | ALIGN 16 | ||
57 : | mmx_one: | ||
58 : | times 8 db 1 | ||
59 : | |||
60 : | SECTION .text align=16 | ||
61 : | |||
62 : | cglobal interpolate8x8_halfpel_h_x86_64 | ||
63 : | cglobal interpolate8x8_halfpel_v_x86_64 | ||
64 : | cglobal interpolate8x8_halfpel_hv_x86_64 | ||
65 : | |||
66 : | cglobal interpolate8x8_halfpel_add_x86_64 | ||
67 : | cglobal interpolate8x8_halfpel_h_add_x86_64 | ||
68 : | cglobal interpolate8x8_halfpel_v_add_x86_64 | ||
69 : | cglobal interpolate8x8_halfpel_hv_add_x86_64 | ||
70 : | |||
71 : | ;=========================================================================== | ||
72 : | ; | ||
73 : | ; void interpolate8x8_halfpel_h_x86_64(uint8_t * const dst, | ||
74 : | ; const uint8_t * const src, | ||
75 : | ; const uint32_t stride, | ||
76 : | ; const uint32_t rounding); | ||
77 : | ; | ||
78 : | ;=========================================================================== | ||
79 : | |||
80 : | %macro COPY_H_SSE_RND0 0 | ||
81 : | movq mm0, [rax] | ||
82 : | pavgb mm0, [rax+1] | ||
83 : | movq mm1, [rax+rdx] | ||
84 : | pavgb mm1, [rax+rdx+1] | ||
85 : | lea rax,[rax+2*rdx] | ||
86 : | movq [rcx],mm0 | ||
87 : | movq [rcx+rdx],mm1 | ||
88 : | %endmacro | ||
89 : | |||
90 : | %macro COPY_H_SSE_RND1 0 | ||
91 : | movq mm0, [rax] | ||
92 : | movq mm1, [rax+rdx] | ||
93 : | movq mm4, mm0 | ||
94 : | movq mm5, mm1 | ||
95 : | movq mm2, [rax+1] | ||
96 : | movq mm3, [rax+rdx+1] | ||
97 : | pavgb mm0, mm2 | ||
98 : | pxor mm2, mm4 | ||
99 : | pavgb mm1, mm3 | ||
100 : | lea rax, [rax+2*rdx] | ||
101 : | pxor mm3, mm5 | ||
102 : | pand mm2, mm7 | ||
103 : | pand mm3, mm7 | ||
104 : | psubb mm0, mm2 | ||
105 : | movq [rcx], mm0 | ||
106 : | psubb mm1, mm3 | ||
107 : | movq [rcx+rdx], mm1 | ||
108 : | %endmacro | ||
109 : | |||
110 : | ALIGN 16 | ||
111 : | interpolate8x8_halfpel_h_x86_64: | ||
112 : | |||
113 : | mov rax, rcx ; rounding | ||
114 : | mov rcx, rdi ; Dst | ||
115 : | test rax,rax | ||
116 : | mov rax, rsi ; src | ||
117 : | ; rdx is stride | ||
118 : | |||
119 : | jnz near .rounding1 | ||
120 : | |||
121 : | COPY_H_SSE_RND0 | ||
122 : | lea rcx,[rcx+2*rdx] | ||
123 : | COPY_H_SSE_RND0 | ||
124 : | lea rcx,[rcx+2*rdx] | ||
125 : | COPY_H_SSE_RND0 | ||
126 : | lea rcx,[rcx+2*rdx] | ||
127 : | COPY_H_SSE_RND0 | ||
128 : | ret | ||
129 : | |||
130 : | .rounding1 | ||
131 : | ; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 | ||
132 : | movq mm7, [mmx_one wrt rip] | ||
133 : | COPY_H_SSE_RND1 | ||
134 : | lea rcx, [rcx+2*rdx] | ||
135 : | COPY_H_SSE_RND1 | ||
136 : | lea rcx,[rcx+2*rdx] | ||
137 : | COPY_H_SSE_RND1 | ||
138 : | lea rcx,[rcx+2*rdx] | ||
139 : | COPY_H_SSE_RND1 | ||
140 : | ret | ||
141 : | .endfunc | ||
142 : | |||
143 : | ;=========================================================================== | ||
144 : | ; | ||
145 : | ; void interpolate8x8_halfpel_v_x86_64(uint8_t * const dst, | ||
146 : | ; const uint8_t * const src, | ||
147 : | ; const uint32_t stride, | ||
148 : | ; const uint32_t rounding); | ||
149 : | ; | ||
150 : | ;=========================================================================== | ||
151 : | |||
152 : | %macro COPY_V_SSE_RND0 0 | ||
153 : | movq mm0, [rax] | ||
154 : | movq mm1, [rax+rdx] | ||
155 : | pavgb mm0, mm1 | ||
156 : | pavgb mm1, [rax+2*rdx] | ||
157 : | lea rax, [rax+2*rdx] | ||
158 : | movq [rcx], mm0 | ||
159 : | movq [rcx+rdx],mm1 | ||
160 : | %endmacro | ||
161 : | |||
162 : | %macro COPY_V_SSE_RND1 0 | ||
163 : | movq mm0, mm2 | ||
164 : | movq mm1, [rax] | ||
165 : | movq mm2, [rax+rdx] | ||
166 : | lea rax,[rax+2*rdx] | ||
167 : | movq mm4, mm0 | ||
168 : | movq mm5, mm1 | ||
169 : | pavgb mm0, mm1 | ||
170 : | pxor mm4, mm1 | ||
171 : | pavgb mm1, mm2 | ||
172 : | pxor mm5, mm2 | ||
173 : | pand mm4, mm7 ; lsb's of (i^j)... | ||
174 : | pand mm5, mm7 ; lsb's of (i^j)... | ||
175 : | psubb mm0, mm4 ; ...are substracted from result of pavgb | ||
176 : | movq [rcx], mm0 | ||
177 : | psubb mm1, mm5 ; ...are substracted from result of pavgb | ||
178 : | movq [rcx+rdx], mm1 | ||
179 : | %endmacro | ||
180 : | |||
181 : | ALIGN 16 | ||
182 : | interpolate8x8_halfpel_v_x86_64: | ||
183 : | mov rax, rcx ; rounding | ||
184 : | mov rcx, rdi ; Dst | ||
185 : | test rax,rax | ||
186 : | mov rax, rsi ; Src | ||
187 : | ; rdx is stride | ||
188 : | |||
189 : | ; we process 2 line at a time | ||
190 : | jnz near .rounding1 | ||
191 : | |||
192 : | COPY_V_SSE_RND0 | ||
193 : | lea rcx, [rcx+2*rdx] | ||
194 : | COPY_V_SSE_RND0 | ||
195 : | lea rcx, [rcx+2*rdx] | ||
196 : | COPY_V_SSE_RND0 | ||
197 : | lea rcx, [rcx+2*rdx] | ||
198 : | COPY_V_SSE_RND0 | ||
199 : | ret | ||
200 : | |||
201 : | .rounding1 | ||
202 : | ; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 | ||
203 : | movq mm7, [mmx_one wrt rip] | ||
204 : | movq mm2, [rax] ; loop invariant | ||
205 : | add rax, rdx | ||
206 : | |||
207 : | COPY_V_SSE_RND1 | ||
208 : | lea rcx,[rcx+2*rdx] | ||
209 : | COPY_V_SSE_RND1 | ||
210 : | lea rcx,[rcx+2*rdx] | ||
211 : | COPY_V_SSE_RND1 | ||
212 : | lea rcx,[rcx+2*rdx] | ||
213 : | COPY_V_SSE_RND1 | ||
214 : | ret | ||
215 : | .endfunc | ||
216 : | |||
217 : | ;=========================================================================== | ||
218 : | ; | ||
219 : | ; void interpolate8x8_halfpel_hv_xmm(uint8_t * const dst, | ||
220 : | ; const uint8_t * const src, | ||
221 : | ; const uint32_t stride, | ||
222 : | ; const uint32_t rounding); | ||
223 : | ; | ||
224 : | ; | ||
225 : | ;=========================================================================== | ||
226 : | |||
227 : | ; The trick is to correct the result of 'pavgb' with some combination of the | ||
228 : | ; lsb's of the 4 input values i,j,k,l, and their intermediate 'pavgb' (s and t). | ||
229 : | ; The boolean relations are: | ||
230 : | ; (i+j+k+l+3)/4 = (s+t+1)/2 - (ij&kl)&st | ||
231 : | ; (i+j+k+l+2)/4 = (s+t+1)/2 - (ij|kl)&st | ||
232 : | ; (i+j+k+l+1)/4 = (s+t+1)/2 - (ij&kl)|st | ||
233 : | ; (i+j+k+l+0)/4 = (s+t+1)/2 - (ij|kl)|st | ||
234 : | ; with s=(i+j+1)/2, t=(k+l+1)/2, ij = i^j, kl = k^l, st = s^t. | ||
235 : | |||
236 : | ; Moreover, we process 2 lines at a times, for better overlapping (~15% faster). | ||
237 : | |||
238 : | %macro COPY_HV_SSE_RND0 0 | ||
239 : | lea rax, [rax+rdx] | ||
240 : | |||
241 : | movq mm0, [rax] | ||
242 : | movq mm1, [rax+1] | ||
243 : | |||
244 : | movq mm6, mm0 | ||
245 : | pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step | ||
246 : | lea rax, [rax+rdx] | ||
247 : | pxor mm1, mm6 ; mm1=(j^k). preserved for next step | ||
248 : | |||
249 : | por mm3, mm1 ; ij |= jk | ||
250 : | movq mm6, mm2 | ||
251 : | pxor mm6, mm0 ; mm6 = s^t | ||
252 : | pand mm3, mm6 ; (ij|jk) &= st | ||
253 : | pavgb mm2, mm0 ; mm2 = (s+t+1)/2 | ||
254 : | pand mm3, mm7 ; mask lsb | ||
255 : | psubb mm2, mm3 ; apply. | ||
256 : | |||
257 : | movq [rcx], mm2 | ||
258 : | |||
259 : | movq mm2, [rax] | ||
260 : | movq mm3, [rax+1] | ||
261 : | movq mm6, mm2 | ||
262 : | pavgb mm2, mm3 ; preserved for next iteration | ||
263 : | lea rcx,[rcx+rdx] | ||
264 : | pxor mm3, mm6 ; preserved for next iteration | ||
265 : | |||
266 : | por mm1, mm3 | ||
267 : | movq mm6, mm0 | ||
268 : | pxor mm6, mm2 | ||
269 : | pand mm1, mm6 | ||
270 : | pavgb mm0, mm2 | ||
271 : | |||
272 : | pand mm1, mm7 | ||
273 : | psubb mm0, mm1 | ||
274 : | |||
275 : | movq [rcx], mm0 | ||
276 : | %endmacro | ||
277 : | |||
278 : | %macro COPY_HV_SSE_RND1 0 | ||
279 : | lea rax, [rax+rdx] | ||
280 : | |||
281 : | movq mm0, [rax] | ||
282 : | movq mm1, [rax+1] | ||
283 : | |||
284 : | movq mm6, mm0 | ||
285 : | pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step | ||
286 : | lea rax, [rax+rdx] | ||
287 : | pxor mm1, mm6 ; mm1=(j^k). preserved for next step | ||
288 : | |||
289 : | pand mm3, mm1 | ||
290 : | movq mm6, mm2 | ||
291 : | pxor mm6, mm0 | ||
292 : | por mm3, mm6 | ||
293 : | pavgb mm2, mm0 | ||
294 : | pand mm3, mm7 | ||
295 : | psubb mm2, mm3 | ||
296 : | |||
297 : | movq [rcx], mm2 | ||
298 : | |||
299 : | movq mm2, [rax] | ||
300 : | movq mm3, [rax+1] | ||
301 : | movq mm6, mm2 | ||
302 : | pavgb mm2, mm3 ; preserved for next iteration | ||
303 : | lea rcx,[rcx+rdx] | ||
304 : | pxor mm3, mm6 ; preserved for next iteration | ||
305 : | |||
306 : | pand mm1, mm3 | ||
307 : | movq mm6, mm0 | ||
308 : | pxor mm6, mm2 | ||
309 : | por mm1, mm6 | ||
310 : | pavgb mm0, mm2 | ||
311 : | pand mm1, mm7 | ||
312 : | psubb mm0, mm1 | ||
313 : | |||
314 : | movq [rcx], mm0 | ||
315 : | %endmacro | ||
316 : | |||
317 : | ALIGN 16 | ||
318 : | interpolate8x8_halfpel_hv_x86_64: | ||
319 : | mov rax, rcx ; rounding | ||
320 : | mov rcx, rdi ; Dst | ||
321 : | test rax, rax | ||
322 : | mov rax, rsi ; Src | ||
323 : | ; rdx is stride | ||
324 : | |||
325 : | movq mm7, [mmx_one wrt rip] | ||
326 : | |||
327 : | ; loop invariants: mm2=(i+j+1)/2 and mm3= i^j | ||
328 : | movq mm2, [rax] | ||
329 : | movq mm3, [rax+1] | ||
330 : | movq mm6, mm2 | ||
331 : | pavgb mm2, mm3 | ||
332 : | pxor mm3, mm6 ; mm2/mm3 ready | ||
333 : | |||
334 : | jnz near .rounding1 | ||
335 : | |||
336 : | COPY_HV_SSE_RND0 | ||
337 : | add rcx, rdx | ||
338 : | COPY_HV_SSE_RND0 | ||
339 : | add rcx, rdx | ||
340 : | COPY_HV_SSE_RND0 | ||
341 : | add rcx, rdx | ||
342 : | COPY_HV_SSE_RND0 | ||
343 : | ret | ||
344 : | |||
345 : | .rounding1 | ||
346 : | COPY_HV_SSE_RND1 | ||
347 : | add rcx, rdx | ||
348 : | COPY_HV_SSE_RND1 | ||
349 : | add rcx, rdx | ||
350 : | COPY_HV_SSE_RND1 | ||
351 : | add rcx, rdx | ||
352 : | COPY_HV_SSE_RND1 | ||
353 : | ret | ||
354 : | .endfunc | ||
355 : | |||
356 : | ;=========================================================================== | ||
357 : | ; | ||
358 : | ; The next functions combine both source halfpel interpolation step and the | ||
359 : | ; averaging (with rouding) step to avoid wasting memory bandwidth computing | ||
360 : | ; intermediate halfpel images and then averaging them. | ||
361 : | ; | ||
362 : | ;=========================================================================== | ||
363 : | |||
364 : | %macro PROLOG0 0 | ||
365 : | ; rcx fourth | ||
366 : | ; rdx third | ||
367 : | ; r8 fifth | ||
368 : | mov r8, rdx ; saves rounding | ||
369 : | mov rcx, rdi ; Dst | ||
370 : | mov rax, rsi ; Src | ||
371 : | ; rdx is stride | ||
372 : | %endmacro | ||
373 : | %macro PROLOG1 0 | ||
374 : | PROLOG0 | ||
375 : | test r8, 1; Rounding? | ||
376 : | %endmacro | ||
377 : | %macro EPILOG 0 | ||
378 : | ret | ||
379 : | %endmacro | ||
380 : | |||
381 : | ;=========================================================================== | ||
382 : | ; | ||
383 : | ; void interpolate8x8_halfpel_add_xmm(uint8_t * const dst, | ||
384 : | ; const uint8_t * const src, | ||
385 : | ; const uint32_t stride, | ||
386 : | ; const uint32_t rounding); | ||
387 : | ; | ||
388 : | ; | ||
389 : | ;=========================================================================== | ||
390 : | |||
391 : | %macro ADD_FF 2 | ||
392 : | movq mm0, [rax+%1] | ||
393 : | movq mm1, [rax+%2] | ||
394 : | pavgb mm0, [rcx+%1] | ||
395 : | pavgb mm1, [rcx+%2] | ||
396 : | movq [rcx+%1], mm0 | ||
397 : | movq [rcx+%2], mm1 | ||
398 : | %endmacro | ||
399 : | |||
400 : | ALIGN 16 | ||
401 : | interpolate8x8_halfpel_add_x86_64: ; 23c | ||
402 : | PROLOG1 | ||
403 : | ADD_FF 0, rdx | ||
404 : | lea rax,[rax+2*rdx] | ||
405 : | lea rcx,[rcx+2*rdx] | ||
406 : | ADD_FF 0, rdx | ||
407 : | lea rax,[rax+2*rdx] | ||
408 : | lea rcx,[rcx+2*rdx] | ||
409 : | ADD_FF 0, rdx | ||
410 : | lea rax,[rax+2*rdx] | ||
411 : | lea rcx,[rcx+2*rdx] | ||
412 : | ADD_FF 0, rdx | ||
413 : | EPILOG | ||
414 : | .endfunc | ||
415 : | |||
416 : | ;=========================================================================== | ||
417 : | ; | ||
418 : | ; void interpolate8x8_halfpel_h_add_xmm(uint8_t * const dst, | ||
419 : | ; const uint8_t * const src, | ||
420 : | ; const uint32_t stride, | ||
421 : | ; const uint32_t rounding); | ||
422 : | ; | ||
423 : | ; | ||
424 : | ;=========================================================================== | ||
425 : | |||
426 : | |||
427 : | %macro ADD_FH_RND0 2 | ||
428 : | movq mm0, [rax+%1] | ||
429 : | movq mm1, [rax+%2] | ||
430 : | pavgb mm0, [rax+%1+1] | ||
431 : | pavgb mm1, [rax+%2+1] | ||
432 : | pavgb mm0, [rcx+%1] | ||
433 : | pavgb mm1, [rcx+%2] | ||
434 : | movq [rcx+%1],mm0 | ||
435 : | movq [rcx+%2],mm1 | ||
436 : | %endmacro | ||
437 : | |||
438 : | %macro ADD_FH_RND1 2 | ||
439 : | movq mm0, [rax+%1] | ||
440 : | movq mm1, [rax+%2] | ||
441 : | movq mm4, mm0 | ||
442 : | movq mm5, mm1 | ||
443 : | movq mm2, [rax+%1+1] | ||
444 : | movq mm3, [rax+%2+1] | ||
445 : | pavgb mm0, mm2 | ||
446 : | ; lea ?? | ||
447 : | pxor mm2, mm4 | ||
448 : | pavgb mm1, mm3 | ||
449 : | pxor mm3, mm5 | ||
450 : | pand mm2, [mmx_one wrt rip] | ||
451 : | pand mm3, [mmx_one wrt rip] | ||
452 : | psubb mm0, mm2 | ||
453 : | psubb mm1, mm3 | ||
454 : | pavgb mm0, [rcx+%1] | ||
455 : | pavgb mm1, [rcx+%2] | ||
456 : | movq [rcx+%1],mm0 | ||
457 : | movq [rcx+%2],mm1 | ||
458 : | %endmacro | ||
459 : | |||
460 : | ALIGN 16 | ||
461 : | interpolate8x8_halfpel_h_add_x86_64: ; 32c | ||
462 : | PROLOG1 | ||
463 : | jnz near .Loop1 | ||
464 : | ADD_FH_RND0 0, rdx | ||
465 : | lea rax,[rax+2*rdx] | ||
466 : | lea rcx,[rcx+2*rdx] | ||
467 : | ADD_FH_RND0 0, rdx | ||
468 : | lea rax,[rax+2*rdx] | ||
469 : | lea rcx,[rcx+2*rdx] | ||
470 : | ADD_FH_RND0 0, rdx | ||
471 : | lea rax,[rax+2*rdx] | ||
472 : | lea rcx,[rcx+2*rdx] | ||
473 : | ADD_FH_RND0 0, rdx | ||
474 : | EPILOG | ||
475 : | |||
476 : | .Loop1 | ||
477 : | ; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 | ||
478 : | ; movq mm7, [mmx_one wrt rip] | ||
479 : | ADD_FH_RND1 0, rdx | ||
480 : | lea rax,[rax+2*rdx] | ||
481 : | lea rcx,[rcx+2*rdx] | ||
482 : | ADD_FH_RND1 0, rdx | ||
483 : | lea rax,[rax+2*rdx] | ||
484 : | lea rcx,[rcx+2*rdx] | ||
485 : | ADD_FH_RND1 0, rdx | ||
486 : | lea rax,[rax+2*rdx] | ||
487 : | lea rcx,[rcx+2*rdx] | ||
488 : | ADD_FH_RND1 0, rdx | ||
489 : | EPILOG | ||
490 : | .endfunc | ||
491 : | |||
492 : | |||
493 : | ;=========================================================================== | ||
494 : | ; | ||
495 : | ; void interpolate8x8_halfpel_v_add_x86_64(uint8_t * const dst, | ||
496 : | ; const uint8_t * const src, | ||
497 : | ; const uint32_t stride, | ||
498 : | ; const uint32_t rounding); | ||
499 : | ; | ||
500 : | ; | ||
501 : | ;=========================================================================== | ||
502 : | |||
503 : | %macro ADD_8_HF_RND0 0 | ||
504 : | movq mm0, [rax] | ||
505 : | movq mm1, [rax+rdx] | ||
506 : | pavgb mm0, mm1 | ||
507 : | pavgb mm1, [rax+2*rdx] | ||
508 : | lea rax,[rax+2*rdx] | ||
509 : | pavgb mm0, [rcx] | ||
510 : | pavgb mm1, [rcx+rdx] | ||
511 : | movq [rcx],mm0 | ||
512 : | movq [rcx+rdx],mm1 | ||
513 : | %endmacro | ||
514 : | |||
515 : | %macro ADD_8_HF_RND1 0 | ||
516 : | movq mm1, [rax+rdx] | ||
517 : | movq mm2, [rax+2*rdx] | ||
518 : | lea rax,[rax+2*rdx] | ||
519 : | movq mm4, mm0 | ||
520 : | movq mm5, mm1 | ||
521 : | pavgb mm0, mm1 | ||
522 : | pxor mm4, mm1 | ||
523 : | pavgb mm1, mm2 | ||
524 : | pxor mm5, mm2 | ||
525 : | pand mm4, mm7 ; lsb's of (i^j)... | ||
526 : | pand mm5, mm7 ; lsb's of (i^j)... | ||
527 : | psubb mm0, mm4 ; ...are substracted from result of pavgb | ||
528 : | pavgb mm0, [rcx] | ||
529 : | movq [rcx], mm0 | ||
530 : | psubb mm1, mm5 ; ...are substracted from result of pavgb | ||
531 : | pavgb mm1, [rcx+rdx] | ||
532 : | movq [rcx+rdx], mm1 | ||
533 : | %endmacro | ||
534 : | |||
535 : | ALIGN 16 | ||
536 : | interpolate8x8_halfpel_v_add_x86_64: | ||
537 : | PROLOG1 | ||
538 : | |||
539 : | jnz near .Loop1 | ||
540 : | pxor mm7, mm7 ; this is a NOP | ||
541 : | |||
542 : | ADD_8_HF_RND0 | ||
543 : | lea rcx,[rcx+2*rdx] | ||
544 : | ADD_8_HF_RND0 | ||
545 : | lea rcx,[rcx+2*rdx] | ||
546 : | ADD_8_HF_RND0 | ||
547 : | lea rcx,[rcx+2*rdx] | ||
548 : | ADD_8_HF_RND0 | ||
549 : | EPILOG | ||
550 : | |||
551 : | .Loop1 | ||
552 : | movq mm0, [rax] ; loop invariant | ||
553 : | movq mm7, [mmx_one wrt rip] | ||
554 : | |||
555 : | ADD_8_HF_RND1 | ||
556 : | movq mm0, mm2 | ||
557 : | lea rcx,[rcx+2*rdx] | ||
558 : | ADD_8_HF_RND1 | ||
559 : | movq mm0, mm2 | ||
560 : | lea rcx,[rcx+2*rdx] | ||
561 : | ADD_8_HF_RND1 | ||
562 : | movq mm0, mm2 | ||
563 : | lea rcx,[rcx+2*rdx] | ||
564 : | ADD_8_HF_RND1 | ||
565 : | EPILOG | ||
566 : | .endfunc | ||
567 : | |||
568 : | ; The trick is to correct the result of 'pavgb' with some combination of the | ||
569 : | ; lsb's of the 4 input values i,j,k,l, and their intermediate 'pavgb' (s and t). | ||
570 : | ; The boolean relations are: | ||
571 : | ; (i+j+k+l+3)/4 = (s+t+1)/2 - (ij&kl)&st | ||
572 : | ; (i+j+k+l+2)/4 = (s+t+1)/2 - (ij|kl)&st | ||
573 : | ; (i+j+k+l+1)/4 = (s+t+1)/2 - (ij&kl)|st | ||
574 : | ; (i+j+k+l+0)/4 = (s+t+1)/2 - (ij|kl)|st | ||
575 : | ; with s=(i+j+1)/2, t=(k+l+1)/2, ij = i^j, kl = k^l, st = s^t. | ||
576 : | |||
577 : | ; Moreover, we process 2 lines at a times, for better overlapping (~15% faster). | ||
578 : | |||
579 : | ;=========================================================================== | ||
580 : | ; | ||
581 : | ; void interpolate8x8_halfpel_hv_add_x86_64(uint8_t * const dst, | ||
582 : | ; const uint8_t * const src, | ||
583 : | ; const uint32_t stride, | ||
584 : | ; const uint32_t rounding); | ||
585 : | ; | ||
586 : | ; | ||
587 : | ;=========================================================================== | ||
588 : | |||
589 : | %macro ADD_HH_RND0 0 | ||
590 : | lea rax,[rax+rdx] | ||
591 : | |||
592 : | movq mm0, [rax] | ||
593 : | movq mm1, [rax+1] | ||
594 : | |||
595 : | movq mm6, mm0 | ||
596 : | pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step | ||
597 : | lea rax,[rax+rdx] | ||
598 : | pxor mm1, mm6 ; mm1=(j^k). preserved for next step | ||
599 : | |||
600 : | por mm3, mm1 ; ij |= jk | ||
601 : | movq mm6, mm2 | ||
602 : | pxor mm6, mm0 ; mm6 = s^t | ||
603 : | pand mm3, mm6 ; (ij|jk) &= st | ||
604 : | pavgb mm2, mm0 ; mm2 = (s+t+1)/2 | ||
605 : | pand mm3, mm7 ; mask lsb | ||
606 : | psubb mm2, mm3 ; apply. | ||
607 : | |||
608 : | pavgb mm2, [rcx] | ||
609 : | movq [rcx], mm2 | ||
610 : | |||
611 : | movq mm2, [rax] | ||
612 : | movq mm3, [rax+1] | ||
613 : | movq mm6, mm2 | ||
614 : | pavgb mm2, mm3 ; preserved for next iteration | ||
615 : | lea rcx,[rcx+rdx] | ||
616 : | pxor mm3, mm6 ; preserved for next iteration | ||
617 : | |||
618 : | por mm1, mm3 | ||
619 : | movq mm6, mm0 | ||
620 : | pxor mm6, mm2 | ||
621 : | pand mm1, mm6 | ||
622 : | pavgb mm0, mm2 | ||
623 : | |||
624 : | pand mm1, mm7 | ||
625 : | psubb mm0, mm1 | ||
626 : | |||
627 : | pavgb mm0, [rcx] | ||
628 : | movq [rcx], mm0 | ||
629 : | %endmacro | ||
630 : | |||
631 : | %macro ADD_HH_RND1 0 | ||
632 : | lea rax,[rax+rdx] | ||
633 : | |||
634 : | movq mm0, [rax] | ||
635 : | movq mm1, [rax+1] | ||
636 : | |||
637 : | movq mm6, mm0 | ||
638 : | pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step | ||
639 : | lea rax,[rax+rdx] | ||
640 : | pxor mm1, mm6 ; mm1=(j^k). preserved for next step | ||
641 : | |||
642 : | pand mm3, mm1 | ||
643 : | movq mm6, mm2 | ||
644 : | pxor mm6, mm0 | ||
645 : | por mm3, mm6 | ||
646 : | pavgb mm2, mm0 | ||
647 : | pand mm3, mm7 | ||
648 : | psubb mm2, mm3 | ||
649 : | |||
650 : | pavgb mm2, [rcx] | ||
651 : | movq [rcx], mm2 | ||
652 : | |||
653 : | movq mm2, [rax] | ||
654 : | movq mm3, [rax+1] | ||
655 : | movq mm6, mm2 | ||
656 : | pavgb mm2, mm3 ; preserved for next iteration | ||
657 : | lea rcx,[rcx+rdx] | ||
658 : | pxor mm3, mm6 ; preserved for next iteration | ||
659 : | |||
660 : | pand mm1, mm3 | ||
661 : | movq mm6, mm0 | ||
662 : | pxor mm6, mm2 | ||
663 : | por mm1, mm6 | ||
664 : | pavgb mm0, mm2 | ||
665 : | pand mm1, mm7 | ||
666 : | psubb mm0, mm1 | ||
667 : | |||
668 : | pavgb mm0, [rcx] | ||
669 : | movq [rcx], mm0 | ||
670 : | %endmacro | ||
671 : | |||
672 : | ALIGN 16 | ||
673 : | interpolate8x8_halfpel_hv_add_x86_64: | ||
674 : | PROLOG1 | ||
675 : | |||
676 : | movq mm7, [mmx_one wrt rip] | ||
677 : | |||
678 : | ; loop invariants: mm2=(i+j+1)/2 and mm3= i^j | ||
679 : | movq mm2, [rax] | ||
680 : | movq mm3, [rax+1] | ||
681 : | movq mm6, mm2 | ||
682 : | pavgb mm2, mm3 | ||
683 : | pxor mm3, mm6 ; mm2/mm3 ready | ||
684 : | |||
685 : | jnz near .Loop1 | ||
686 : | |||
687 : | ADD_HH_RND0 | ||
688 : | add rcx, rdx | ||
689 : | ADD_HH_RND0 | ||
690 : | add rcx, rdx | ||
691 : | ADD_HH_RND0 | ||
692 : | add rcx, rdx | ||
693 : | ADD_HH_RND0 | ||
694 : | EPILOG | ||
695 : | |||
696 : | .Loop1 | ||
697 : | ADD_HH_RND1 | ||
698 : | add rcx, rdx | ||
699 : | ADD_HH_RND1 | ||
700 : | add rcx, rdx | ||
701 : | ADD_HH_RND1 | ||
702 : | add rcx, rdx | ||
703 : | ADD_HH_RND1 | ||
704 : | |||
705 : | EPILOG | ||
706 : | .endfunc |
No admin address has been configured | ViewVC Help |
Powered by ViewVC 1.0.4 |