Parent Directory
|
Revision Log
Revision 1004 - (view) (download)
1 : | Isibaar | 1004 | ;/***************************************************************************** |
2 : | ; * | ||
3 : | ; * XVID MPEG-4 VIDEO CODEC | ||
4 : | ; * - Quarter-pixel interpolation - | ||
5 : | ; * Copyright(C) 2002 Pascal Massimino <skal@planet-d.net> | ||
6 : | ; * | ||
7 : | ; * This file is part of XviD, a free MPEG-4 video encoder/decoder | ||
8 : | ; * | ||
9 : | ; * XviD is free software; you can redistribute it and/or modify it | ||
10 : | ; * under the terms of the GNU General Public License as published by | ||
11 : | ; * the Free Software Foundation; either version 2 of the License, or | ||
12 : | ; * (at your option) any later version. | ||
13 : | ; * | ||
14 : | ; * This program is distributed in the hope that it will be useful, | ||
15 : | ; * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 : | ; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 : | ; * GNU General Public License for more details. | ||
18 : | ; * | ||
19 : | ; * You should have received a copy of the GNU General Public License | ||
20 : | ; * along with this program; if not, write to the Free Software | ||
21 : | ; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
22 : | ; * | ||
23 : | ; * Under section 8 of the GNU General Public License, the copyright | ||
24 : | ; * holders of XVID explicitly forbid distribution in the following | ||
25 : | ; * countries: | ||
26 : | ; * | ||
27 : | ; * - Japan | ||
28 : | ; * - United States of America | ||
29 : | ; * | ||
30 : | ; * Linking XviD statically or dynamically with other modules is making a | ||
31 : | ; * combined work based on XviD. Thus, the terms and conditions of the | ||
32 : | ; * GNU General Public License cover the whole combination. | ||
33 : | ; * | ||
34 : | ; * As a special exception, the copyright holders of XviD give you | ||
35 : | ; * permission to link XviD with independent modules that communicate with | ||
36 : | ; * XviD solely through the VFW1.1 and DShow interfaces, regardless of the | ||
37 : | ; * license terms of these independent modules, and to copy and distribute | ||
38 : | ; * the resulting combined work under terms of your choice, provided that | ||
39 : | ; * every copy of the combined work is accompanied by a complete copy of | ||
40 : | ; * the source code of XviD (the version of XviD used to produce the | ||
41 : | ; * combined work), being distributed under the terms of the GNU General | ||
42 : | ; * Public License plus this exception. An independent module is a module | ||
43 : | ; * which is not derived from or based on XviD. | ||
44 : | ; * | ||
45 : | ; * Note that people who make modified versions of XviD are not obligated | ||
46 : | ; * to grant this special exception for their modified versions; it is | ||
47 : | ; * their choice whether to do so. The GNU General Public License gives | ||
48 : | ; * permission to release a modified version without this exception; this | ||
49 : | ; * exception also makes it possible to release a modified version which | ||
50 : | ; * carries forward this exception. | ||
51 : | ; * | ||
52 : | ; * $Id: qpel_mmx.asm,v 1.1.2.1 2003-05-03 23:26:26 Isibaar Exp $ | ||
53 : | ; * | ||
54 : | ; *************************************************************************/ | ||
55 : | |||
56 : | ;/************************************************************************** | ||
57 : | ; * | ||
58 : | ; * History: | ||
59 : | ; * | ||
60 : | ; * 22.10.2002 initial coding. unoptimized 'proof of concept', | ||
61 : | ; * just to heft the qpel filtering. - Skal - | ||
62 : | ; * | ||
63 : | ; *************************************************************************/ | ||
64 : | |||
65 : | |||
66 : | %define USE_TABLES ; in order to use xvid_FIR_x_x_x_x tables | ||
67 : | ; instead of xvid_Expand_mmx... | ||
68 : | |||
69 : | |||
70 : | bits 32 | ||
71 : | |||
72 : | %macro cglobal 1 | ||
73 : | %ifdef PREFIX | ||
74 : | global _%1 | ||
75 : | %define %1 _%1 | ||
76 : | %else | ||
77 : | global %1 | ||
78 : | %endif | ||
79 : | %endmacro | ||
80 : | %macro cextern 1 | ||
81 : | %ifdef PREFIX | ||
82 : | extern _%1 | ||
83 : | %define %1 _%1 | ||
84 : | %else | ||
85 : | extern %1 | ||
86 : | %endif | ||
87 : | %endmacro | ||
88 : | |||
89 : | |||
90 : | ;////////////////////////////////////////////////////////////////////// | ||
91 : | ;// Declarations | ||
92 : | ;// all signatures are: | ||
93 : | ;// void XXX(uint8_t *dst, const uint8_t *src, | ||
94 : | ;// int32_t length, int32_t stride, int32_t rounding) | ||
95 : | ;////////////////////////////////////////////////////////////////////// | ||
96 : | |||
97 : | cglobal xvid_H_Pass_16_mmx | ||
98 : | cglobal xvid_H_Pass_Avrg_16_mmx | ||
99 : | cglobal xvid_H_Pass_Avrg_Up_16_mmx | ||
100 : | cglobal xvid_V_Pass_16_mmx | ||
101 : | cglobal xvid_V_Pass_Avrg_16_mmx | ||
102 : | cglobal xvid_V_Pass_Avrg_Up_16_mmx | ||
103 : | cglobal xvid_H_Pass_8_mmx | ||
104 : | cglobal xvid_H_Pass_Avrg_8_mmx | ||
105 : | cglobal xvid_H_Pass_Avrg_Up_8_mmx | ||
106 : | cglobal xvid_V_Pass_8_mmx | ||
107 : | cglobal xvid_V_Pass_Avrg_8_mmx | ||
108 : | cglobal xvid_V_Pass_Avrg_Up_8_mmx | ||
109 : | |||
110 : | cglobal xvid_H_Pass_Add_16_mmx | ||
111 : | cglobal xvid_H_Pass_Avrg_Add_16_mmx | ||
112 : | cglobal xvid_H_Pass_Avrg_Up_Add_16_mmx | ||
113 : | cglobal xvid_V_Pass_Add_16_mmx | ||
114 : | cglobal xvid_V_Pass_Avrg_Add_16_mmx | ||
115 : | cglobal xvid_V_Pass_Avrg_Up_Add_16_mmx | ||
116 : | cglobal xvid_H_Pass_8_Add_mmx | ||
117 : | cglobal xvid_H_Pass_Avrg_8_Add_mmx | ||
118 : | cglobal xvid_H_Pass_Avrg_Up_8_Add_mmx | ||
119 : | cglobal xvid_V_Pass_8_Add_mmx | ||
120 : | cglobal xvid_V_Pass_Avrg_8_Add_mmx | ||
121 : | cglobal xvid_V_Pass_Avrg_Up_8_Add_mmx | ||
122 : | |||
123 : | cextern xvid_Expand_mmx | ||
124 : | |||
125 : | %ifdef USE_TABLES | ||
126 : | |||
127 : | cextern xvid_FIR_1_0_0_0 | ||
128 : | cextern xvid_FIR_3_1_0_0 | ||
129 : | cextern xvid_FIR_6_3_1_0 | ||
130 : | cextern xvid_FIR_14_3_2_1 | ||
131 : | cextern xvid_FIR_20_6_3_1 | ||
132 : | cextern xvid_FIR_20_20_6_3 | ||
133 : | cextern xvid_FIR_23_19_6_3 | ||
134 : | cextern xvid_FIR_7_20_20_6 | ||
135 : | cextern xvid_FIR_6_20_20_6 | ||
136 : | cextern xvid_FIR_6_20_20_7 | ||
137 : | cextern xvid_FIR_3_6_20_20 | ||
138 : | cextern xvid_FIR_3_6_19_23 | ||
139 : | cextern xvid_FIR_1_3_6_20 | ||
140 : | cextern xvid_FIR_1_2_3_14 | ||
141 : | cextern xvid_FIR_0_1_3_6 | ||
142 : | cextern xvid_FIR_0_0_1_3 | ||
143 : | cextern xvid_FIR_0_0_0_1 | ||
144 : | |||
145 : | %endif | ||
146 : | |||
147 : | ;////////////////////////////////////////////////////////////////////// | ||
148 : | |||
149 : | section .data | ||
150 : | |||
151 : | align 16 | ||
152 : | Rounder1_MMX: | ||
153 : | times 4 dw 1 | ||
154 : | Rounder0_MMX: | ||
155 : | times 4 dw 0 | ||
156 : | |||
157 : | align 16 | ||
158 : | Rounder_QP_MMX | ||
159 : | times 4 dw 16 | ||
160 : | times 4 dw 15 | ||
161 : | |||
162 : | %ifndef USE_TABLES | ||
163 : | |||
164 : | align 16 | ||
165 : | |||
166 : | ; H-Pass table shared by 16x? and 8x? filters | ||
167 : | |||
168 : | FIR_R0: dw 14, -3, 2, -1 | ||
169 : | align 16 | ||
170 : | FIR_R1: dw 23, 19, -6, 3, -1, 0, 0, 0 | ||
171 : | |||
172 : | FIR_R2: dw -7, 20, 20, -6, 3, -1, 0, 0 | ||
173 : | |||
174 : | FIR_R3: dw 3, -6, 20, 20, -6, 3, -1, 0 | ||
175 : | |||
176 : | FIR_R4: dw -1, 3, -6, 20, 20, -6, 3, -1 | ||
177 : | |||
178 : | FIR_R5: dw 0, -1, 3, -6, 20, 20, -6, 3, -1, 0, 0, 0 | ||
179 : | align 16 | ||
180 : | FIR_R6: dw 0, 0, -1, 3, -6, 20, 20, -6, 3, -1, 0, 0 | ||
181 : | align 16 | ||
182 : | FIR_R7: dw 0, 0, 0, -1, 3, -6, 20, 20, -6, 3, -1, 0 | ||
183 : | align 16 | ||
184 : | FIR_R8: dw -1, 3, -6, 20, 20, -6, 3, -1 | ||
185 : | |||
186 : | FIR_R9: dw 0, -1, 3, -6, 20, 20, -6, 3, -1, 0, 0, 0 | ||
187 : | align 16 | ||
188 : | FIR_R10: dw 0, 0, -1, 3, -6, 20, 20, -6, 3, -1, 0, 0 | ||
189 : | align 16 | ||
190 : | FIR_R11: dw 0, 0, 0, -1, 3, -6, 20, 20, -6, 3, -1, 0 | ||
191 : | align 16 | ||
192 : | FIR_R12: dw -1, 3, -6, 20, 20, -6, 3, -1 | ||
193 : | |||
194 : | FIR_R13: dw 0, -1, 3, -6, 20, 20, -6, 3 | ||
195 : | |||
196 : | FIR_R14: dw 0, 0, -1, 3, -6, 20, 20, -7 | ||
197 : | |||
198 : | FIR_R15: dw 0, 0, 0, -1, 3, -6, 19, 23 | ||
199 : | |||
200 : | FIR_R16: dw -1, 2, -3, 14 | ||
201 : | |||
202 : | %endif ; !USE_TABLES | ||
203 : | |||
204 : | ; V-Pass taps | ||
205 : | |||
206 : | align 16 | ||
207 : | FIR_Cm7: times 4 dw -7 | ||
208 : | FIR_Cm6: times 4 dw -6 | ||
209 : | FIR_Cm3: times 4 dw -3 | ||
210 : | FIR_Cm1: times 4 dw -1 | ||
211 : | FIR_C2: times 4 dw 2 | ||
212 : | FIR_C3: times 4 dw 3 | ||
213 : | FIR_C14: times 4 dw 14 | ||
214 : | FIR_C19: times 4 dw 19 | ||
215 : | FIR_C20: times 4 dw 20 | ||
216 : | FIR_C23: times 4 dw 23 | ||
217 : | |||
218 : | section .text | ||
219 : | |||
220 : | ;////////////////////////////////////////////////////////////////////// | ||
221 : | ;// Here we go with the Q-Pel mess. | ||
222 : | ;// For horizontal passes, we process 4 *output* pixel in parallel | ||
223 : | ;// For vertical ones, we process 4 *input* pixel in parallel. | ||
224 : | ;////////////////////////////////////////////////////////////////////// | ||
225 : | |||
226 : | %macro PROLOG_NO_AVRG 0 | ||
227 : | push esi | ||
228 : | push edi | ||
229 : | push ebp | ||
230 : | mov edi, [esp+16 + 0*4] ; Dst | ||
231 : | mov esi, [esp+16 + 1*4] ; Src | ||
232 : | mov ecx, [esp+16 + 2*4] ; Size | ||
233 : | mov ebp, [esp+16 + 3*4] ; BpS | ||
234 : | mov eax, [esp+16 + 4*4] ; Rnd | ||
235 : | and eax, 1 | ||
236 : | movq mm7, [Rounder_QP_MMX+eax*8] ; rounder | ||
237 : | %endmacro | ||
238 : | |||
239 : | %macro EPILOG_NO_AVRG 0 | ||
240 : | pop ebp | ||
241 : | pop edi | ||
242 : | pop esi | ||
243 : | ret | ||
244 : | %endmacro | ||
245 : | |||
246 : | %macro PROLOG_AVRG 0 | ||
247 : | push ebx | ||
248 : | push esi | ||
249 : | push edi | ||
250 : | push ebp | ||
251 : | mov edi, [esp+20 + 0*4] ; Dst | ||
252 : | mov esi, [esp+20 + 1*4] ; Src | ||
253 : | mov ecx, [esp+20 + 2*4] ; Size | ||
254 : | mov ebp, [esp+20 + 3*4] ; BpS | ||
255 : | mov eax, [esp+20 + 4*4] ; Rnd | ||
256 : | and eax, 1 | ||
257 : | movq mm7, [Rounder_QP_MMX+eax*8] ; rounder | ||
258 : | lea ebx, [Rounder1_MMX+eax*8] ; *Rounder2 | ||
259 : | %endmacro | ||
260 : | |||
261 : | %macro EPILOG_AVRG 0 | ||
262 : | pop ebp | ||
263 : | pop edi | ||
264 : | pop esi | ||
265 : | pop ebx | ||
266 : | ret | ||
267 : | %endmacro | ||
268 : | |||
269 : | ;////////////////////////////////////////////////////////////////////// | ||
270 : | ;// | ||
271 : | ;// All horizontal passes | ||
272 : | ;// | ||
273 : | ;////////////////////////////////////////////////////////////////////// | ||
274 : | |||
275 : | ; macros for USE_TABLES | ||
276 : | |||
277 : | %macro TLOAD 2 ; %1,%2: src pixels | ||
278 : | movzx eax, byte [esi+%1] | ||
279 : | movzx edx, byte [esi+%2] | ||
280 : | movq mm0, [xvid_FIR_14_3_2_1 + eax*8 ] | ||
281 : | movq mm3, [xvid_FIR_1_2_3_14 + edx*8 ] | ||
282 : | paddw mm0, mm7 | ||
283 : | paddw mm3, mm7 | ||
284 : | %endmacro | ||
285 : | |||
286 : | %macro TACCUM2 5 ;%1:src pixel/%2-%3:Taps tables/ %4-%5:dst regs | ||
287 : | movzx eax, byte [esi+%1] | ||
288 : | paddw %4, [%2 + eax*8] | ||
289 : | paddw %5, [%3 + eax*8] | ||
290 : | %endmacro | ||
291 : | |||
292 : | %macro TACCUM3 7 ;%1:src pixel/%2-%4:Taps tables/%5-%7:dst regs | ||
293 : | movzx eax, byte [esi+%1] | ||
294 : | paddw %5, [%2 + eax*8] | ||
295 : | paddw %6, [%3 + eax*8] | ||
296 : | paddw %7, [%4 + eax*8] | ||
297 : | %endmacro | ||
298 : | |||
299 : | ;////////////////////////////////////////////////////////////////////// | ||
300 : | |||
301 : | ; macros without USE_TABLES | ||
302 : | |||
303 : | %macro LOAD 2 ; %1,%2: src pixels | ||
304 : | movzx eax, byte [esi+%1] | ||
305 : | movzx edx, byte [esi+%2] | ||
306 : | movq mm0, [xvid_Expand_mmx + eax*8] | ||
307 : | movq mm3, [xvid_Expand_mmx + edx*8] | ||
308 : | pmullw mm0, [FIR_R0 ] | ||
309 : | pmullw mm3, [FIR_R16] | ||
310 : | paddw mm0, mm7 | ||
311 : | paddw mm3, mm7 | ||
312 : | %endmacro | ||
313 : | |||
314 : | %macro ACCUM2 4 ;src pixel/Taps/dst regs #1-#2 | ||
315 : | movzx eax, byte [esi+%1] | ||
316 : | movq mm4, [xvid_Expand_mmx + eax*8] | ||
317 : | movq mm5, mm4 | ||
318 : | pmullw mm4, [%2] | ||
319 : | pmullw mm5, [%2+8] | ||
320 : | paddw %3, mm4 | ||
321 : | paddw %4, mm5 | ||
322 : | %endmacro | ||
323 : | |||
324 : | %macro ACCUM3 5 ;src pixel/Taps/dst regs #1-#2-#3 | ||
325 : | movzx eax, byte [esi+%1] | ||
326 : | movq mm4, [xvid_Expand_mmx + eax*8] | ||
327 : | movq mm5, mm4 | ||
328 : | movq mm6, mm5 | ||
329 : | pmullw mm4, [%2 ] | ||
330 : | pmullw mm5, [%2+ 8] | ||
331 : | pmullw mm6, [%2+16] | ||
332 : | paddw %3, mm4 | ||
333 : | paddw %4, mm5 | ||
334 : | paddw %5, mm6 | ||
335 : | %endmacro | ||
336 : | |||
337 : | ;////////////////////////////////////////////////////////////////////// | ||
338 : | |||
339 : | %macro MIX 3 ; %1:reg, %2:src, %3:rounder | ||
340 : | pxor mm6, mm6 | ||
341 : | movq mm4, [%2] | ||
342 : | movq mm1, %1 | ||
343 : | movq mm5, mm4 | ||
344 : | punpcklbw %1, mm6 | ||
345 : | punpcklbw mm4, mm6 | ||
346 : | punpckhbw mm1, mm6 | ||
347 : | punpckhbw mm5, mm6 | ||
348 : | movq mm6, [%3] ; rounder #2 | ||
349 : | paddusw %1, mm4 | ||
350 : | paddusw mm1, mm5 | ||
351 : | paddusw %1, mm6 | ||
352 : | paddusw mm1, mm6 | ||
353 : | psrlw %1, 1 | ||
354 : | psrlw mm1, 1 | ||
355 : | packuswb %1, mm1 | ||
356 : | %endmacro | ||
357 : | |||
358 : | ;////////////////////////////////////////////////////////////////////// | ||
359 : | |||
360 : | %macro H_PASS_16 2 ; %1:src-op (0=NONE,1=AVRG,2=AVRG-UP), %2:dst-op (NONE/AVRG) | ||
361 : | |||
362 : | %if (%2==0) && (%1==0) | ||
363 : | PROLOG_NO_AVRG | ||
364 : | %else | ||
365 : | PROLOG_AVRG | ||
366 : | %endif | ||
367 : | |||
368 : | .Loop | ||
369 : | |||
370 : | ; mm0..mm3 serves as a 4x4 delay line | ||
371 : | |||
372 : | %ifndef USE_TABLES | ||
373 : | |||
374 : | LOAD 0, 16 ; special case for 1rst/last pixel | ||
375 : | movq mm1, mm7 | ||
376 : | movq mm2, mm7 | ||
377 : | |||
378 : | ACCUM2 1, FIR_R1, mm0, mm1 | ||
379 : | ACCUM2 2, FIR_R2, mm0, mm1 | ||
380 : | ACCUM2 3, FIR_R3, mm0, mm1 | ||
381 : | ACCUM2 4, FIR_R4, mm0, mm1 | ||
382 : | |||
383 : | ACCUM3 5, FIR_R5, mm0, mm1, mm2 | ||
384 : | ACCUM3 6, FIR_R6, mm0, mm1, mm2 | ||
385 : | ACCUM3 7, FIR_R7, mm0, mm1, mm2 | ||
386 : | ACCUM2 8, FIR_R8, mm1, mm2 | ||
387 : | ACCUM3 9, FIR_R9, mm1, mm2, mm3 | ||
388 : | ACCUM3 10, FIR_R10,mm1, mm2, mm3 | ||
389 : | ACCUM3 11, FIR_R11,mm1, mm2, mm3 | ||
390 : | |||
391 : | ACCUM2 12, FIR_R12, mm2, mm3 | ||
392 : | ACCUM2 13, FIR_R13, mm2, mm3 | ||
393 : | ACCUM2 14, FIR_R14, mm2, mm3 | ||
394 : | ACCUM2 15, FIR_R15, mm2, mm3 | ||
395 : | |||
396 : | %else | ||
397 : | |||
398 : | TLOAD 0, 16 ; special case for 1rst/last pixel | ||
399 : | movq mm1, mm7 | ||
400 : | movq mm2, mm7 | ||
401 : | |||
402 : | TACCUM2 1, xvid_FIR_23_19_6_3, xvid_FIR_1_0_0_0 , mm0, mm1 | ||
403 : | TACCUM2 2, xvid_FIR_7_20_20_6, xvid_FIR_3_1_0_0 , mm0, mm1 | ||
404 : | TACCUM2 3, xvid_FIR_3_6_20_20, xvid_FIR_6_3_1_0 , mm0, mm1 | ||
405 : | TACCUM2 4, xvid_FIR_1_3_6_20 , xvid_FIR_20_6_3_1, mm0, mm1 | ||
406 : | |||
407 : | TACCUM3 5, xvid_FIR_0_1_3_6 , xvid_FIR_20_20_6_3, xvid_FIR_1_0_0_0 , mm0, mm1, mm2 | ||
408 : | TACCUM3 6, xvid_FIR_0_0_1_3 , xvid_FIR_6_20_20_6, xvid_FIR_3_1_0_0 , mm0, mm1, mm2 | ||
409 : | TACCUM3 7, xvid_FIR_0_0_0_1 , xvid_FIR_3_6_20_20, xvid_FIR_6_3_1_0 , mm0, mm1, mm2 | ||
410 : | |||
411 : | TACCUM2 8, xvid_FIR_1_3_6_20 , xvid_FIR_20_6_3_1 , mm1, mm2 | ||
412 : | |||
413 : | TACCUM3 9, xvid_FIR_0_1_3_6 , xvid_FIR_20_20_6_3, xvid_FIR_1_0_0_0, mm1, mm2, mm3 | ||
414 : | TACCUM3 10, xvid_FIR_0_0_1_3 , xvid_FIR_6_20_20_6, xvid_FIR_3_1_0_0, mm1, mm2, mm3 | ||
415 : | TACCUM3 11, xvid_FIR_0_0_0_1 , xvid_FIR_3_6_20_20, xvid_FIR_6_3_1_0, mm1, mm2, mm3 | ||
416 : | |||
417 : | TACCUM2 12, xvid_FIR_1_3_6_20, xvid_FIR_20_6_3_1 , mm2, mm3 | ||
418 : | TACCUM2 13, xvid_FIR_0_1_3_6 , xvid_FIR_20_20_6_3, mm2, mm3 | ||
419 : | TACCUM2 14, xvid_FIR_0_0_1_3 , xvid_FIR_6_20_20_7, mm2, mm3 | ||
420 : | TACCUM2 15, xvid_FIR_0_0_0_1 , xvid_FIR_3_6_19_23, mm2, mm3 | ||
421 : | |||
422 : | %endif | ||
423 : | |||
424 : | psraw mm0, 5 | ||
425 : | psraw mm1, 5 | ||
426 : | psraw mm2, 5 | ||
427 : | psraw mm3, 5 | ||
428 : | packuswb mm0, mm1 | ||
429 : | packuswb mm2, mm3 | ||
430 : | |||
431 : | %if (%1==1) | ||
432 : | MIX mm0, esi, ebx | ||
433 : | %elif (%1==2) | ||
434 : | MIX mm0, esi+1, ebx | ||
435 : | %endif | ||
436 : | %if (%2==1) | ||
437 : | MIX mm0, edi, Rounder1_MMX | ||
438 : | %endif | ||
439 : | |||
440 : | %if (%1==1) | ||
441 : | MIX mm2, esi+8, ebx | ||
442 : | %elif (%1==2) | ||
443 : | MIX mm2, esi+9, ebx | ||
444 : | %endif | ||
445 : | %if (%2==1) | ||
446 : | MIX mm2, edi+8, Rounder1_MMX | ||
447 : | %endif | ||
448 : | |||
449 : | lea esi, [esi+ebp] | ||
450 : | |||
451 : | movq [edi+0], mm0 | ||
452 : | movq [edi+8], mm2 | ||
453 : | |||
454 : | add edi, ebp | ||
455 : | dec ecx | ||
456 : | jg .Loop | ||
457 : | |||
458 : | %if (%2==0) && (%1==0) | ||
459 : | EPILOG_NO_AVRG | ||
460 : | %else | ||
461 : | EPILOG_AVRG | ||
462 : | %endif | ||
463 : | |||
464 : | %endmacro | ||
465 : | |||
466 : | |||
467 : | ;////////////////////////////////////////////////////////////////////// | ||
468 : | |||
469 : | %macro H_PASS_8 2 ; %1:src-op (0=NONE,1=AVRG,2=AVRG-UP), %2:dst-op (NONE/AVRG) | ||
470 : | |||
471 : | %if (%2==0) && (%1==0) | ||
472 : | PROLOG_NO_AVRG | ||
473 : | %else | ||
474 : | PROLOG_AVRG | ||
475 : | %endif | ||
476 : | |||
477 : | .Loop | ||
478 : | ; mm0..mm3 serves as a 4x4 delay line | ||
479 : | |||
480 : | %ifndef USE_TABLES | ||
481 : | |||
482 : | LOAD 0, 8 ; special case for 1rst/last pixel | ||
483 : | ACCUM2 1, FIR_R1, mm0, mm3 | ||
484 : | ACCUM2 2, FIR_R2, mm0, mm3 | ||
485 : | ACCUM2 3, FIR_R3, mm0, mm3 | ||
486 : | ACCUM2 4, FIR_R4, mm0, mm3 | ||
487 : | |||
488 : | ACCUM2 5, FIR_R13, mm0, mm3 | ||
489 : | ACCUM2 6, FIR_R14, mm0, mm3 | ||
490 : | ACCUM2 7, FIR_R15, mm0, mm3 | ||
491 : | |||
492 : | %else | ||
493 : | |||
494 : | %if 0 ; test with no unrolling | ||
495 : | |||
496 : | TLOAD 0, 8 ; special case for 1rst/last pixel | ||
497 : | TACCUM2 1, xvid_FIR_23_19_6_3, xvid_FIR_1_0_0_0 , mm0, mm3 | ||
498 : | TACCUM2 2, xvid_FIR_7_20_20_6, xvid_FIR_3_1_0_0 , mm0, mm3 | ||
499 : | TACCUM2 3, xvid_FIR_3_6_20_20, xvid_FIR_6_3_1_0 , mm0, mm3 | ||
500 : | TACCUM2 4, xvid_FIR_1_3_6_20 , xvid_FIR_20_6_3_1 , mm0, mm3 | ||
501 : | TACCUM2 5, xvid_FIR_0_1_3_6 , xvid_FIR_20_20_6_3, mm0, mm3 | ||
502 : | TACCUM2 6, xvid_FIR_0_0_1_3 , xvid_FIR_6_20_20_7, mm0, mm3 | ||
503 : | TACCUM2 7, xvid_FIR_0_0_0_1 , xvid_FIR_3_6_19_23, mm0, mm3 | ||
504 : | |||
505 : | %else ; test with unrolling (little faster, but not much) | ||
506 : | |||
507 : | movzx eax, byte [esi] | ||
508 : | movzx edx, byte [esi+8] | ||
509 : | movq mm0, [xvid_FIR_14_3_2_1 + eax*8 ] | ||
510 : | movzx eax, byte [esi+1] | ||
511 : | movq mm3, [xvid_FIR_1_2_3_14 + edx*8 ] | ||
512 : | paddw mm0, mm7 | ||
513 : | paddw mm3, mm7 | ||
514 : | |||
515 : | movzx edx, byte [esi+2] | ||
516 : | paddw mm0, [xvid_FIR_23_19_6_3 + eax*8] | ||
517 : | paddw mm3, [xvid_FIR_1_0_0_0 + eax*8] | ||
518 : | |||
519 : | movzx eax, byte [esi+3] | ||
520 : | paddw mm0, [xvid_FIR_7_20_20_6 + edx*8] | ||
521 : | paddw mm3, [xvid_FIR_3_1_0_0 + edx*8] | ||
522 : | |||
523 : | movzx edx, byte [esi+4] | ||
524 : | paddw mm0, [xvid_FIR_3_6_20_20 + eax*8] | ||
525 : | paddw mm3, [xvid_FIR_6_3_1_0 + eax*8] | ||
526 : | |||
527 : | movzx eax, byte [esi+5] | ||
528 : | paddw mm0, [xvid_FIR_1_3_6_20 + edx*8] | ||
529 : | paddw mm3, [xvid_FIR_20_6_3_1 + edx*8] | ||
530 : | |||
531 : | movzx edx, byte [esi+6] | ||
532 : | paddw mm0, [xvid_FIR_0_1_3_6 + eax*8] | ||
533 : | paddw mm3, [xvid_FIR_20_20_6_3 + eax*8] | ||
534 : | |||
535 : | movzx eax, byte [esi+7] | ||
536 : | paddw mm0, [xvid_FIR_0_0_1_3 + edx*8] | ||
537 : | paddw mm3, [xvid_FIR_6_20_20_7 + edx*8] | ||
538 : | |||
539 : | paddw mm0, [xvid_FIR_0_0_0_1 + eax*8] | ||
540 : | paddw mm3, [xvid_FIR_3_6_19_23 + eax*8] | ||
541 : | |||
542 : | %endif | ||
543 : | |||
544 : | %endif ; !USE_TABLES | ||
545 : | |||
546 : | psraw mm0, 5 | ||
547 : | psraw mm3, 5 | ||
548 : | packuswb mm0, mm3 | ||
549 : | |||
550 : | %if (%1==1) | ||
551 : | MIX mm0, esi, ebx | ||
552 : | %elif (%1==2) | ||
553 : | MIX mm0, esi+1, ebx | ||
554 : | %endif | ||
555 : | %if (%2==1) | ||
556 : | MIX mm0, edi, Rounder1_MMX | ||
557 : | %endif | ||
558 : | |||
559 : | movq [edi], mm0 | ||
560 : | |||
561 : | add edi, ebp | ||
562 : | add esi, ebp | ||
563 : | dec ecx | ||
564 : | jg .Loop | ||
565 : | |||
566 : | %if (%2==0) && (%1==0) | ||
567 : | EPILOG_NO_AVRG | ||
568 : | %else | ||
569 : | EPILOG_AVRG | ||
570 : | %endif | ||
571 : | |||
572 : | %endmacro | ||
573 : | |||
574 : | ;////////////////////////////////////////////////////////////////////// | ||
575 : | ;// 16x? copy Functions | ||
576 : | |||
577 : | xvid_H_Pass_16_mmx: | ||
578 : | H_PASS_16 0, 0 | ||
579 : | xvid_H_Pass_Avrg_16_mmx: | ||
580 : | H_PASS_16 1, 0 | ||
581 : | xvid_H_Pass_Avrg_Up_16_mmx: | ||
582 : | H_PASS_16 2, 0 | ||
583 : | |||
584 : | ;////////////////////////////////////////////////////////////////////// | ||
585 : | ;// 8x? copy Functions | ||
586 : | |||
587 : | xvid_H_Pass_8_mmx: | ||
588 : | H_PASS_8 0, 0 | ||
589 : | xvid_H_Pass_Avrg_8_mmx: | ||
590 : | H_PASS_8 1, 0 | ||
591 : | xvid_H_Pass_Avrg_Up_8_mmx: | ||
592 : | H_PASS_8 2, 0 | ||
593 : | |||
594 : | ;////////////////////////////////////////////////////////////////////// | ||
595 : | ;// 16x? avrg Functions | ||
596 : | |||
597 : | xvid_H_Pass_Add_16_mmx: | ||
598 : | H_PASS_16 0, 1 | ||
599 : | xvid_H_Pass_Avrg_Add_16_mmx: | ||
600 : | H_PASS_16 1, 1 | ||
601 : | xvid_H_Pass_Avrg_Up_Add_16_mmx: | ||
602 : | H_PASS_16 2, 1 | ||
603 : | |||
604 : | ;////////////////////////////////////////////////////////////////////// | ||
605 : | ;// 8x? avrg Functions | ||
606 : | |||
607 : | xvid_H_Pass_8_Add_mmx: | ||
608 : | H_PASS_8 0, 1 | ||
609 : | xvid_H_Pass_Avrg_8_Add_mmx: | ||
610 : | H_PASS_8 1, 1 | ||
611 : | xvid_H_Pass_Avrg_Up_8_Add_mmx: | ||
612 : | H_PASS_8 2, 1 | ||
613 : | |||
614 : | |||
615 : | |||
616 : | ;////////////////////////////////////////////////////////////////////// | ||
617 : | ;// | ||
618 : | ;// All vertical passes | ||
619 : | ;// | ||
620 : | ;////////////////////////////////////////////////////////////////////// | ||
621 : | |||
622 : | %macro V_LOAD 1 ; %1=Last? | ||
623 : | |||
624 : | movd mm4, [edx] | ||
625 : | pxor mm6, mm6 | ||
626 : | %if (%1==0) | ||
627 : | add edx, ebp | ||
628 : | %endif | ||
629 : | punpcklbw mm4, mm6 | ||
630 : | |||
631 : | %endmacro | ||
632 : | |||
633 : | %macro V_ACC1 2 ; %1:reg; 2:tap | ||
634 : | pmullw mm4, [%2] | ||
635 : | paddw %1, mm4 | ||
636 : | %endmacro | ||
637 : | |||
638 : | %macro V_ACC2 4 ; %1-%2: regs, %3-%4: taps | ||
639 : | movq mm5, mm4 | ||
640 : | movq mm6, mm4 | ||
641 : | pmullw mm5, [%3] | ||
642 : | pmullw mm6, [%4] | ||
643 : | paddw %1, mm5 | ||
644 : | paddw %2, mm6 | ||
645 : | %endmacro | ||
646 : | |||
647 : | %macro V_ACC2l 4 ; %1-%2: regs, %3-%4: taps | ||
648 : | movq mm5, mm4 | ||
649 : | pmullw mm5, [%3] | ||
650 : | pmullw mm4, [%4] | ||
651 : | paddw %1, mm5 | ||
652 : | paddw %2, mm4 | ||
653 : | %endmacro | ||
654 : | |||
655 : | %macro V_ACC4 8 ; %1-%4: regs, %5-%8: taps | ||
656 : | V_ACC2 %1,%2, %5,%6 | ||
657 : | V_ACC2l %3,%4, %7,%8 | ||
658 : | %endmacro | ||
659 : | |||
660 : | |||
661 : | %macro V_MIX 3 ; %1:dst-reg, %2:src, %3: rounder | ||
662 : | pxor mm6, mm6 | ||
663 : | movq mm4, [%2] | ||
664 : | punpcklbw %1, mm6 | ||
665 : | punpcklbw mm4, mm6 | ||
666 : | paddusw %1, mm4 | ||
667 : | paddusw %1, [%3] | ||
668 : | psrlw %1, 1 | ||
669 : | packuswb %1, %1 | ||
670 : | %endmacro | ||
671 : | |||
672 : | %macro V_STORE 4 ; %1-%2: mix ops, %3: reg, %4:last? | ||
673 : | |||
674 : | psraw %3, 5 | ||
675 : | packuswb %3, %3 | ||
676 : | |||
677 : | %if (%1==1) | ||
678 : | V_MIX %3, esi, ebx | ||
679 : | add esi, ebp | ||
680 : | %elif (%1==2) | ||
681 : | add esi, ebp | ||
682 : | V_MIX %3, esi, ebx | ||
683 : | %endif | ||
684 : | %if (%2==1) | ||
685 : | V_MIX %3, edi, Rounder1_MMX | ||
686 : | %endif | ||
687 : | |||
688 : | movd eax, %3 | ||
689 : | mov [edi], eax | ||
690 : | |||
691 : | %if (%4==0) | ||
692 : | add edi, ebp | ||
693 : | %endif | ||
694 : | |||
695 : | %endmacro | ||
696 : | |||
697 : | ;////////////////////////////////////////////////////////////////////// | ||
698 : | |||
699 : | %macro V_PASS_16 2 ; %1:src-op (0=NONE,1=AVRG,2=AVRG-UP), %2:dst-op (NONE/AVRG) | ||
700 : | |||
701 : | %if (%2==0) && (%1==0) | ||
702 : | PROLOG_NO_AVRG | ||
703 : | %else | ||
704 : | PROLOG_AVRG | ||
705 : | %endif | ||
706 : | |||
707 : | ; we process one stripe of 4x16 pixel each time. | ||
708 : | ; the size (3rd argument) is meant to be a multiple of 4 | ||
709 : | ; mm0..mm3 serves as a 4x4 delay line | ||
710 : | |||
711 : | .Loop | ||
712 : | |||
713 : | push edi | ||
714 : | push esi ; esi is preserved for src-mixing | ||
715 : | mov edx, esi | ||
716 : | |||
717 : | ; ouput rows [0..3], from input rows [0..8] | ||
718 : | |||
719 : | movq mm0, mm7 | ||
720 : | movq mm1, mm7 | ||
721 : | movq mm2, mm7 | ||
722 : | movq mm3, mm7 | ||
723 : | |||
724 : | V_LOAD 0 | ||
725 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_C14, FIR_Cm3, FIR_C2, FIR_Cm1 | ||
726 : | V_LOAD 0 | ||
727 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_C23, FIR_C19, FIR_Cm6, FIR_C3 | ||
728 : | V_LOAD 0 | ||
729 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_Cm7, FIR_C20, FIR_C20, FIR_Cm6 | ||
730 : | V_LOAD 0 | ||
731 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_C3, FIR_Cm6, FIR_C20, FIR_C20 | ||
732 : | V_LOAD 0 | ||
733 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_Cm1, FIR_C3, FIR_Cm6, FIR_C20 | ||
734 : | V_STORE %1, %2, mm0, 0 | ||
735 : | |||
736 : | V_LOAD 0 | ||
737 : | V_ACC2 mm1, mm2, FIR_Cm1, FIR_C3 | ||
738 : | V_ACC1 mm3, FIR_Cm6 | ||
739 : | V_STORE %1, %2, mm1, 0 | ||
740 : | |||
741 : | V_LOAD 0 | ||
742 : | V_ACC2l mm2, mm3, FIR_Cm1, FIR_C3 | ||
743 : | V_STORE %1, %2, mm2, 0 | ||
744 : | |||
745 : | V_LOAD 1 | ||
746 : | V_ACC1 mm3, FIR_Cm1 | ||
747 : | V_STORE %1, %2, mm3, 0 | ||
748 : | |||
749 : | ; ouput rows [4..7], from input rows [1..11] (!!) | ||
750 : | |||
751 : | mov esi, [esp] | ||
752 : | lea edx, [esi+ebp] | ||
753 : | |||
754 : | lea esi, [esi+4*ebp] ; for src-mixing | ||
755 : | push esi ; this will be the new value for next round | ||
756 : | |||
757 : | movq mm0, mm7 | ||
758 : | movq mm1, mm7 | ||
759 : | movq mm2, mm7 | ||
760 : | movq mm3, mm7 | ||
761 : | |||
762 : | V_LOAD 0 | ||
763 : | V_ACC1 mm0, FIR_Cm1 | ||
764 : | |||
765 : | V_LOAD 0 | ||
766 : | V_ACC2l mm0, mm1, FIR_C3, FIR_Cm1 | ||
767 : | |||
768 : | V_LOAD 0 | ||
769 : | V_ACC2 mm0, mm1, FIR_Cm6, FIR_C3 | ||
770 : | V_ACC1 mm2, FIR_Cm1 | ||
771 : | |||
772 : | V_LOAD 0 | ||
773 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_C20, FIR_Cm6, FIR_C3, FIR_Cm1 | ||
774 : | V_LOAD 0 | ||
775 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_C20, FIR_C20, FIR_Cm6, FIR_C3 | ||
776 : | V_LOAD 0 | ||
777 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_Cm6, FIR_C20, FIR_C20, FIR_Cm6 | ||
778 : | V_LOAD 0 | ||
779 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_C3, FIR_Cm6, FIR_C20, FIR_C20 | ||
780 : | V_LOAD 0 | ||
781 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_Cm1, FIR_C3, FIR_Cm6, FIR_C20 | ||
782 : | V_STORE %1, %2, mm0, 0 | ||
783 : | |||
784 : | V_LOAD 0 | ||
785 : | V_ACC2 mm1, mm2, FIR_Cm1, FIR_C3 | ||
786 : | V_ACC1 mm3, FIR_Cm6 | ||
787 : | V_STORE %1, %2, mm1, 0 | ||
788 : | |||
789 : | V_LOAD 0 | ||
790 : | V_ACC2l mm2, mm3, FIR_Cm1, FIR_C3 | ||
791 : | V_STORE %1, %2, mm2, 0 | ||
792 : | |||
793 : | V_LOAD 1 | ||
794 : | V_ACC1 mm3, FIR_Cm1 | ||
795 : | V_STORE %1, %2, mm3, 0 | ||
796 : | |||
797 : | ; ouput rows [8..11], from input rows [5..15] | ||
798 : | |||
799 : | pop esi | ||
800 : | lea edx, [esi+ebp] | ||
801 : | |||
802 : | lea esi, [esi+4*ebp] ; for src-mixing | ||
803 : | push esi ; this will be the new value for next round | ||
804 : | |||
805 : | movq mm0, mm7 | ||
806 : | movq mm1, mm7 | ||
807 : | movq mm2, mm7 | ||
808 : | movq mm3, mm7 | ||
809 : | |||
810 : | V_LOAD 0 | ||
811 : | V_ACC1 mm0, FIR_Cm1 | ||
812 : | |||
813 : | V_LOAD 0 | ||
814 : | V_ACC2l mm0, mm1, FIR_C3, FIR_Cm1 | ||
815 : | |||
816 : | V_LOAD 0 | ||
817 : | V_ACC2 mm0, mm1, FIR_Cm6, FIR_C3 | ||
818 : | V_ACC1 mm2, FIR_Cm1 | ||
819 : | |||
820 : | V_LOAD 0 | ||
821 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_C20, FIR_Cm6, FIR_C3, FIR_Cm1 | ||
822 : | V_LOAD 0 | ||
823 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_C20, FIR_C20, FIR_Cm6, FIR_C3 | ||
824 : | V_LOAD 0 | ||
825 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_Cm6, FIR_C20, FIR_C20, FIR_Cm6 | ||
826 : | V_LOAD 0 | ||
827 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_C3, FIR_Cm6, FIR_C20, FIR_C20 | ||
828 : | V_LOAD 0 | ||
829 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_Cm1, FIR_C3, FIR_Cm6, FIR_C20 | ||
830 : | |||
831 : | V_STORE %1, %2, mm0, 0 | ||
832 : | |||
833 : | V_LOAD 0 | ||
834 : | V_ACC2 mm1, mm2, FIR_Cm1, FIR_C3 | ||
835 : | V_ACC1 mm3, FIR_Cm6 | ||
836 : | V_STORE %1, %2, mm1, 0 | ||
837 : | |||
838 : | V_LOAD 0 | ||
839 : | V_ACC2l mm2, mm3, FIR_Cm1, FIR_C3 | ||
840 : | V_STORE %1, %2, mm2, 0 | ||
841 : | |||
842 : | V_LOAD 1 | ||
843 : | V_ACC1 mm3, FIR_Cm1 | ||
844 : | V_STORE %1, %2, mm3, 0 | ||
845 : | |||
846 : | |||
847 : | ; ouput rows [12..15], from input rows [9.16] | ||
848 : | |||
849 : | pop esi | ||
850 : | lea edx, [esi+ebp] | ||
851 : | |||
852 : | %if (%1!=0) | ||
853 : | lea esi, [esi+4*ebp] ; for src-mixing | ||
854 : | %endif | ||
855 : | |||
856 : | movq mm0, mm7 | ||
857 : | movq mm1, mm7 | ||
858 : | movq mm2, mm7 | ||
859 : | movq mm3, mm7 | ||
860 : | |||
861 : | V_LOAD 0 | ||
862 : | V_ACC1 mm3, FIR_Cm1 | ||
863 : | |||
864 : | V_LOAD 0 | ||
865 : | V_ACC2l mm2, mm3, FIR_Cm1, FIR_C3 | ||
866 : | |||
867 : | V_LOAD 0 | ||
868 : | V_ACC2 mm1, mm2, FIR_Cm1, FIR_C3 | ||
869 : | V_ACC1 mm3, FIR_Cm6 | ||
870 : | |||
871 : | V_LOAD 0 | ||
872 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_Cm1, FIR_C3, FIR_Cm6, FIR_C20 | ||
873 : | V_LOAD 0 | ||
874 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_C3, FIR_Cm6, FIR_C20, FIR_C20 | ||
875 : | V_LOAD 0 | ||
876 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_Cm7, FIR_C20, FIR_C20, FIR_Cm6 | ||
877 : | V_LOAD 0 | ||
878 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_C23, FIR_C19, FIR_Cm6, FIR_C3 | ||
879 : | V_LOAD 1 | ||
880 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_C14, FIR_Cm3, FIR_C2, FIR_Cm1 | ||
881 : | |||
882 : | V_STORE %1, %2, mm3, 0 | ||
883 : | V_STORE %1, %2, mm2, 0 | ||
884 : | V_STORE %1, %2, mm1, 0 | ||
885 : | V_STORE %1, %2, mm0, 1 | ||
886 : | |||
887 : | ; ... next 4 columns | ||
888 : | |||
889 : | pop esi | ||
890 : | pop edi | ||
891 : | add esi, 4 | ||
892 : | add edi, 4 | ||
893 : | sub ecx, 4 | ||
894 : | jg .Loop | ||
895 : | |||
896 : | %if (%2==0) && (%1==0) | ||
897 : | EPILOG_NO_AVRG | ||
898 : | %else | ||
899 : | EPILOG_AVRG | ||
900 : | %endif | ||
901 : | |||
902 : | %endmacro | ||
903 : | |||
904 : | ;////////////////////////////////////////////////////////////////////// | ||
905 : | |||
906 : | %macro V_PASS_8 2 ; %1:src-op (0=NONE,1=AVRG,2=AVRG-UP), %2:dst-op (NONE/AVRG) | ||
907 : | |||
908 : | %if (%2==0) && (%1==0) | ||
909 : | PROLOG_NO_AVRG | ||
910 : | %else | ||
911 : | PROLOG_AVRG | ||
912 : | %endif | ||
913 : | |||
914 : | ; we process one stripe of 4x8 pixel each time | ||
915 : | ; the size (3rd argument) is meant to be a multiple of 4 | ||
916 : | ; mm0..mm3 serves as a 4x4 delay line | ||
917 : | .Loop | ||
918 : | |||
919 : | push edi | ||
920 : | push esi ; esi is preserved for src-mixing | ||
921 : | mov edx, esi | ||
922 : | |||
923 : | ; ouput rows [0..3], from input rows [0..8] | ||
924 : | |||
925 : | movq mm0, mm7 | ||
926 : | movq mm1, mm7 | ||
927 : | movq mm2, mm7 | ||
928 : | movq mm3, mm7 | ||
929 : | |||
930 : | V_LOAD 0 | ||
931 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_C14, FIR_Cm3, FIR_C2, FIR_Cm1 | ||
932 : | V_LOAD 0 | ||
933 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_C23, FIR_C19, FIR_Cm6, FIR_C3 | ||
934 : | V_LOAD 0 | ||
935 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_Cm7, FIR_C20, FIR_C20, FIR_Cm6 | ||
936 : | V_LOAD 0 | ||
937 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_C3, FIR_Cm6, FIR_C20, FIR_C20 | ||
938 : | V_LOAD 0 | ||
939 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_Cm1, FIR_C3, FIR_Cm6, FIR_C20 | ||
940 : | V_STORE %1, %2, mm0, 0 | ||
941 : | |||
942 : | V_LOAD 0 | ||
943 : | V_ACC2 mm1, mm2, FIR_Cm1, FIR_C3 | ||
944 : | V_ACC1 mm3, FIR_Cm6 | ||
945 : | |||
946 : | V_STORE %1, %2, mm1, 0 | ||
947 : | |||
948 : | V_LOAD 0 | ||
949 : | V_ACC2l mm2, mm3, FIR_Cm1, FIR_C3 | ||
950 : | V_STORE %1, %2, mm2, 0 | ||
951 : | |||
952 : | V_LOAD 1 | ||
953 : | V_ACC1 mm3, FIR_Cm1 | ||
954 : | V_STORE %1, %2, mm3, 0 | ||
955 : | |||
956 : | ; ouput rows [4..7], from input rows [1..9] | ||
957 : | |||
958 : | mov esi, [esp] | ||
959 : | lea edx, [esi+ebp] | ||
960 : | |||
961 : | %if (%1!=0) | ||
962 : | lea esi, [esi+4*ebp] ; for src-mixing | ||
963 : | %endif | ||
964 : | |||
965 : | movq mm0, mm7 | ||
966 : | movq mm1, mm7 | ||
967 : | movq mm2, mm7 | ||
968 : | movq mm3, mm7 | ||
969 : | |||
970 : | V_LOAD 0 | ||
971 : | V_ACC1 mm3, FIR_Cm1 | ||
972 : | |||
973 : | V_LOAD 0 | ||
974 : | V_ACC2l mm2, mm3, FIR_Cm1, FIR_C3 | ||
975 : | |||
976 : | V_LOAD 0 | ||
977 : | V_ACC2 mm1, mm2, FIR_Cm1, FIR_C3 | ||
978 : | V_ACC1 mm3, FIR_Cm6 | ||
979 : | |||
980 : | V_LOAD 0 | ||
981 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_Cm1, FIR_C3, FIR_Cm6, FIR_C20 | ||
982 : | V_LOAD 0 | ||
983 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_C3, FIR_Cm6, FIR_C20, FIR_C20 | ||
984 : | V_LOAD 0 | ||
985 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_Cm7, FIR_C20, FIR_C20, FIR_Cm6 | ||
986 : | V_LOAD 0 | ||
987 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_C23, FIR_C19, FIR_Cm6, FIR_C3 | ||
988 : | V_LOAD 1 | ||
989 : | V_ACC4 mm0, mm1, mm2, mm3, FIR_C14, FIR_Cm3, FIR_C2, FIR_Cm1 | ||
990 : | |||
991 : | V_STORE %1, %2, mm3, 0 | ||
992 : | V_STORE %1, %2, mm2, 0 | ||
993 : | V_STORE %1, %2, mm1, 0 | ||
994 : | V_STORE %1, %2, mm0, 1 | ||
995 : | |||
996 : | ; ... next 4 columns | ||
997 : | |||
998 : | pop esi | ||
999 : | pop edi | ||
1000 : | add esi, 4 | ||
1001 : | add edi, 4 | ||
1002 : | sub ecx, 4 | ||
1003 : | jg .Loop | ||
1004 : | |||
1005 : | %if (%2==0) && (%1==0) | ||
1006 : | EPILOG_NO_AVRG | ||
1007 : | %else | ||
1008 : | EPILOG_AVRG | ||
1009 : | %endif | ||
1010 : | |||
1011 : | %endmacro | ||
1012 : | |||
1013 : | |||
1014 : | ;////////////////////////////////////////////////////////////////////// | ||
1015 : | ;// 16x? copy Functions | ||
1016 : | |||
1017 : | xvid_V_Pass_16_mmx: | ||
1018 : | V_PASS_16 0, 0 | ||
1019 : | xvid_V_Pass_Avrg_16_mmx: | ||
1020 : | V_PASS_16 1, 0 | ||
1021 : | xvid_V_Pass_Avrg_Up_16_mmx: | ||
1022 : | V_PASS_16 2, 0 | ||
1023 : | |||
1024 : | ;////////////////////////////////////////////////////////////////////// | ||
1025 : | ;// 8x? copy Functions | ||
1026 : | |||
1027 : | xvid_V_Pass_8_mmx: | ||
1028 : | V_PASS_8 0, 0 | ||
1029 : | xvid_V_Pass_Avrg_8_mmx: | ||
1030 : | V_PASS_8 1, 0 | ||
1031 : | xvid_V_Pass_Avrg_Up_8_mmx: | ||
1032 : | V_PASS_8 2, 0 | ||
1033 : | |||
1034 : | ;////////////////////////////////////////////////////////////////////// | ||
1035 : | ;// 16x? avrg Functions | ||
1036 : | |||
1037 : | xvid_V_Pass_Add_16_mmx: | ||
1038 : | V_PASS_16 0, 1 | ||
1039 : | xvid_V_Pass_Avrg_Add_16_mmx: | ||
1040 : | V_PASS_16 1, 1 | ||
1041 : | xvid_V_Pass_Avrg_Up_Add_16_mmx: | ||
1042 : | V_PASS_16 2, 1 | ||
1043 : | |||
1044 : | ;////////////////////////////////////////////////////////////////////// | ||
1045 : | ;// 8x? avrg Functions | ||
1046 : | |||
1047 : | xvid_V_Pass_8_Add_mmx: | ||
1048 : | V_PASS_8 0, 1 | ||
1049 : | xvid_V_Pass_Avrg_8_Add_mmx: | ||
1050 : | V_PASS_8 1, 1 | ||
1051 : | xvid_V_Pass_Avrg_Up_8_Add_mmx: | ||
1052 : | V_PASS_8 2, 1 | ||
1053 : | |||
1054 : | ;////////////////////////////////////////////////////////////////////// |
No admin address has been configured | ViewVC Help |
Powered by ViewVC 1.0.4 |