Parent Directory
|
Revision Log
Revision 1883 - (view) (download)
1 : | edgomez | 851 | ;/***************************************************************************** |
2 : | ; * | ||
3 : | ; * XVID MPEG-4 VIDEO CODEC | ||
4 : | edgomez | 1382 | ; * - Reduced-Resolution utilities - |
5 : | edgomez | 851 | ; * |
6 : | ; * Copyright(C) 2002 Pascal Massimino <skal@planet-d.net> | ||
7 : | ; * | ||
8 : | Isibaar | 1883 | ; * Xvid is free software; you can redistribute it and/or modify it |
9 : | edgomez | 851 | ; * under the terms of the GNU General Public License as published by |
10 : | ; * the Free Software Foundation; either version 2 of the License, or | ||
11 : | ; * (at your option) any later version. | ||
12 : | ; * | ||
13 : | ; * This program is distributed in the hope that it will be useful, | ||
14 : | ; * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 : | ; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 : | ; * GNU General Public License for more details. | ||
17 : | ; * | ||
18 : | ; * You should have received a copy of the GNU General Public License | ||
19 : | ; * along with this program; if not, write to the Free Software | ||
20 : | ; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
21 : | ; * | ||
22 : | Isibaar | 1883 | ; * $Id: reduced_mmx.asm,v 1.13 2010-03-09 10:00:14 Isibaar Exp $ |
23 : | edgomez | 851 | ; * |
24 : | ; *************************************************************************/ | ||
25 : | |||
26 : | Isibaar | 1795 | %include "nasm.inc" |
27 : | edgomez | 851 | |
28 : | ;=========================================================================== | ||
29 : | |||
30 : | Isibaar | 1795 | DATA |
31 : | edgomez | 851 | |
32 : | Isibaar | 1795 | align SECTION_ALIGN |
33 : | edgomez | 851 | Up31 dw 3, 1, 3, 1 |
34 : | Up13 dw 1, 3, 1, 3 | ||
35 : | Up93 dw 9, 3, 9, 3 | ||
36 : | Up39 dw 3, 9, 3, 9 | ||
37 : | Cst0 dw 0, 0, 0, 0 | ||
38 : | Cst2 dw 2, 2, 2, 2 | ||
39 : | Cst3 dw 3, 3, 3, 3 | ||
40 : | Cst32 dw 32,32,32,32 | ||
41 : | Cst2000 dw 2, 0, 0, 0 | ||
42 : | Cst0002 dw 0, 0, 0, 2 | ||
43 : | |||
44 : | Mask_ff dw 0xff,0xff,0xff,0xff | ||
45 : | |||
46 : | ;=========================================================================== | ||
47 : | |||
48 : | Isibaar | 1844 | TEXT |
49 : | edgomez | 851 | |
50 : | cglobal xvid_Copy_Upsampled_8x8_16To8_mmx | ||
51 : | cglobal xvid_Add_Upsampled_8x8_16To8_mmx | ||
52 : | cglobal xvid_Copy_Upsampled_8x8_16To8_xmm | ||
53 : | cglobal xvid_Add_Upsampled_8x8_16To8_xmm | ||
54 : | |||
55 : | cglobal xvid_HFilter_31_mmx | ||
56 : | cglobal xvid_VFilter_31_x86 | ||
57 : | cglobal xvid_HFilter_31_x86 | ||
58 : | |||
59 : | cglobal xvid_Filter_18x18_To_8x8_mmx | ||
60 : | cglobal xvid_Filter_Diff_18x18_To_8x8_mmx | ||
61 : | |||
62 : | |||
63 : | ;////////////////////////////////////////////////////////////////////// | ||
64 : | ;// 8x8 -> 16x16 upsampling (16b) | ||
65 : | ;////////////////////////////////////////////////////////////////////// | ||
66 : | |||
67 : | %macro MUL_PACK 4 ; %1/%2: regs %3/%4/%5: Up13/Up31 | ||
68 : | pmullw %1, %3 ; [Up13] | ||
69 : | pmullw mm4, %4 ; [Up31] | ||
70 : | pmullw %2, %3 ; [Up13] | ||
71 : | pmullw mm5, %4 ; [Up31] | ||
72 : | paddsw %1, [Cst2] | ||
73 : | paddsw %2, [Cst2] | ||
74 : | paddsw %1, mm4 | ||
75 : | paddsw %2, mm5 | ||
76 : | %endmacro | ||
77 : | |||
78 : | ; MMX-way of reordering columns... | ||
79 : | |||
80 : | %macro COL03 3 ;%1/%2: regs, %3: row -output: mm4/mm5 | ||
81 : | Isibaar | 1795 | movq %1, [TMP1+%3*16+0*2] ; %1 = 0|1|2|3 |
82 : | movq %2,[TMP1+%3*16+1*2] ; %2 = 1|2|3|4 | ||
83 : | edgomez | 851 | movq mm5, %1 ; mm5 = 0|1|2|3 |
84 : | movq mm4, %1 ; mm4 = 0|1|2|3 | ||
85 : | punpckhwd mm5,%2 ; mm5 = 2|3|3|4 | ||
86 : | punpcklwd mm4,%2 ; mm4 = 0|1|1|2 | ||
87 : | punpcklwd %1,%1 ; %1 = 0|0|1|1 | ||
88 : | punpcklwd %2, mm5 ; %2 = 1|2|2|3 | ||
89 : | punpcklwd %1, mm4 ; %1 = 0|0|0|1 | ||
90 : | %endmacro | ||
91 : | |||
92 : | %macro COL47 3 ;%1-%2: regs, %3: row -output: mm4/mm5 | ||
93 : | Isibaar | 1795 | movq mm5, [TMP1+%3*16+4*2] ; mm5 = 4|5|6|7 |
94 : | movq %1, [TMP1+%3*16+3*2] ; %1 = 3|4|5|6 | ||
95 : | edgomez | 851 | movq %2, mm5 ; %2 = 4|5|6|7 |
96 : | movq mm4, mm5 ; mm4 = 4|5|6|7 | ||
97 : | punpckhwd %2, %2 ; %2 = 6|6|7|7 | ||
98 : | punpckhwd mm5, %2 ; mm5 = 6|7|7|7 | ||
99 : | movq %2, %1 ; %2 = 3|4|5|6 | ||
100 : | punpcklwd %1, mm4 ; %1 = 3|4|4|5 | ||
101 : | punpckhwd %2, mm4 ; %2 = 5|6|6|7 | ||
102 : | punpcklwd mm4, %2 ; mm4 = 4|5|5|6 | ||
103 : | %endmacro | ||
104 : | |||
105 : | %macro MIX_ROWS 4 ; %1/%2:prev %3/4:cur (preserved) mm4/mm5: output | ||
106 : | ; we need to perform: (%1,%3) -> (%1 = 3*%1+%3, mm4 = 3*%3+%1), %3 preserved. | ||
107 : | movq mm4, [Cst3] | ||
108 : | movq mm5, [Cst3] | ||
109 : | pmullw mm4, %3 | ||
110 : | pmullw mm5, %4 | ||
111 : | paddsw mm4, %1 | ||
112 : | paddsw mm5, %2 | ||
113 : | pmullw %1, [Cst3] | ||
114 : | pmullw %2, [Cst3] | ||
115 : | paddsw %1, %3 | ||
116 : | paddsw %2, %4 | ||
117 : | %endmacro | ||
118 : | |||
119 : | ;=========================================================================== | ||
120 : | ; | ||
121 : | edgomez | 1382 | ; void xvid_Copy_Upsampled_8x8_16To8_mmx(uint8_t *Dst, |
122 : | edgomez | 851 | ; const int16_t *Src, const int BpS); |
123 : | ; | ||
124 : | ;=========================================================================== | ||
125 : | |||
126 : | edgomez | 1382 | ; Note: we can use ">>2" instead of "/4" here, since we |
127 : | edgomez | 851 | ; are (supposed to be) averaging positive values |
128 : | |||
129 : | %macro STORE_1 2 | ||
130 : | psraw %1, 2 | ||
131 : | psraw %2, 2 | ||
132 : | edgomez | 1382 | packuswb %1,%2 |
133 : | Isibaar | 1795 | movq [TMP0], %1 |
134 : | edgomez | 851 | %endmacro |
135 : | |||
136 : | %macro STORE_2 2 ; pack and store (%1,%2) + (mm4,mm5) | ||
137 : | psraw %1, 4 | ||
138 : | psraw %2, 4 | ||
139 : | psraw mm4, 4 | ||
140 : | psraw mm5, 4 | ||
141 : | packuswb %1,%2 | ||
142 : | packuswb mm4, mm5 | ||
143 : | Isibaar | 1795 | movq [TMP0], %1 |
144 : | movq [TMP0+_EAX], mm4 | ||
145 : | lea TMP0, [TMP0+2*_EAX] | ||
146 : | edgomez | 851 | %endmacro |
147 : | |||
148 : | ;////////////////////////////////////////////////////////////////////// | ||
149 : | |||
150 : | Isibaar | 1795 | align SECTION_ALIGN |
151 : | edgomez | 851 | xvid_Copy_Upsampled_8x8_16To8_mmx: ; 344c |
152 : | |||
153 : | Isibaar | 1795 | mov TMP0, prm1 ; Dst |
154 : | mov TMP1, prm2 ; Src | ||
155 : | mov _EAX, prm3 ; BpS | ||
156 : | edgomez | 851 | |
157 : | movq mm6, [Up13] | ||
158 : | movq mm7, [Up31] | ||
159 : | |||
160 : | COL03 mm0, mm1, 0 | ||
161 : | MUL_PACK mm0,mm1, mm6, mm7 | ||
162 : | movq mm4, mm0 | ||
163 : | movq mm5, mm1 | ||
164 : | STORE_1 mm4, mm5 | ||
165 : | Isibaar | 1795 | add TMP0, _EAX |
166 : | edgomez | 851 | |
167 : | COL03 mm2, mm3, 1 | ||
168 : | MUL_PACK mm2,mm3, mm6, mm7 | ||
169 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
170 : | STORE_2 mm0, mm1 | ||
171 : | |||
172 : | COL03 mm0, mm1, 2 | ||
173 : | MUL_PACK mm0,mm1, mm6, mm7 | ||
174 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
175 : | STORE_2 mm2, mm3 | ||
176 : | |||
177 : | COL03 mm2, mm3, 3 | ||
178 : | MUL_PACK mm2,mm3, mm6, mm7 | ||
179 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
180 : | STORE_2 mm0, mm1 | ||
181 : | |||
182 : | COL03 mm0, mm1, 4 | ||
183 : | MUL_PACK mm0,mm1, mm6, mm7 | ||
184 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
185 : | STORE_2 mm2, mm3 | ||
186 : | |||
187 : | COL03 mm2, mm3, 5 | ||
188 : | MUL_PACK mm2,mm3, mm6, mm7 | ||
189 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
190 : | STORE_2 mm0, mm1 | ||
191 : | |||
192 : | COL03 mm0, mm1, 6 | ||
193 : | MUL_PACK mm0,mm1, mm6, mm7 | ||
194 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
195 : | STORE_2 mm2, mm3 | ||
196 : | |||
197 : | COL03 mm2, mm3, 7 | ||
198 : | MUL_PACK mm2,mm3, mm6, mm7 | ||
199 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
200 : | STORE_2 mm0, mm1 | ||
201 : | |||
202 : | STORE_1 mm2, mm3 | ||
203 : | |||
204 : | Isibaar | 1795 | mov TMP0, prm1 |
205 : | add TMP0, 8 | ||
206 : | edgomez | 851 | |
207 : | COL47 mm0, mm1, 0 | ||
208 : | MUL_PACK mm0,mm1, mm6, mm7 | ||
209 : | movq mm4, mm0 | ||
210 : | movq mm5, mm1 | ||
211 : | STORE_1 mm4, mm5 | ||
212 : | Isibaar | 1795 | add TMP0, _EAX |
213 : | edgomez | 851 | |
214 : | COL47 mm2, mm3, 1 | ||
215 : | MUL_PACK mm2,mm3, mm6, mm7 | ||
216 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
217 : | STORE_2 mm0, mm1 | ||
218 : | |||
219 : | COL47 mm0, mm1, 2 | ||
220 : | MUL_PACK mm0,mm1, mm6, mm7 | ||
221 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
222 : | STORE_2 mm2, mm3 | ||
223 : | |||
224 : | COL47 mm2, mm3, 3 | ||
225 : | MUL_PACK mm2,mm3, mm6, mm7 | ||
226 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
227 : | STORE_2 mm0, mm1 | ||
228 : | |||
229 : | COL47 mm0, mm1, 4 | ||
230 : | MUL_PACK mm0,mm1, mm6, mm7 | ||
231 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
232 : | STORE_2 mm2, mm3 | ||
233 : | |||
234 : | COL47 mm2, mm3, 5 | ||
235 : | MUL_PACK mm2,mm3, mm6, mm7 | ||
236 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
237 : | STORE_2 mm0, mm1 | ||
238 : | |||
239 : | COL47 mm0, mm1, 6 | ||
240 : | MUL_PACK mm0,mm1, mm6, mm7 | ||
241 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
242 : | STORE_2 mm2, mm3 | ||
243 : | |||
244 : | COL47 mm2, mm3, 7 | ||
245 : | MUL_PACK mm2,mm3, mm6, mm7 | ||
246 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
247 : | STORE_2 mm0, mm1 | ||
248 : | |||
249 : | STORE_1 mm2, mm3 | ||
250 : | |||
251 : | ret | ||
252 : | Isibaar | 1793 | ENDFUNC |
253 : | edgomez | 851 | |
254 : | ;=========================================================================== | ||
255 : | ; | ||
256 : | edgomez | 1382 | ; void xvid_Add_Upsampled_8x8_16To8_mmx(uint8_t *Dst, |
257 : | edgomez | 851 | ; const int16_t *Src, const int BpS); |
258 : | ; | ||
259 : | ;=========================================================================== | ||
260 : | |||
261 : | ; Note: grrr... the 'pcmpgtw' stuff are the "/4" and "/16" operators | ||
262 : | edgomez | 1382 | ; implemented with ">>2" and ">>4" using: |
263 : | edgomez | 851 | ; x/4 = ( (x-(x<0))>>2 ) + (x<0) |
264 : | ; x/16 = ( (x-(x<0))>>4 ) + (x<0) | ||
265 : | |||
266 : | %macro STORE_ADD_1 2 | ||
267 : | ; We substract the rounder '2' for corner pixels, | ||
268 : | ; since when 'x' is negative, (x*4 + 2)/4 is *not* | ||
269 : | ; equal to 'x'. In fact, the correct relation is: | ||
270 : | ; (x*4 + 2)/4 = x - (x<0) | ||
271 : | ; So, better revert to (x*4)/4 = x. | ||
272 : | |||
273 : | psubsw %1, [Cst2000] | ||
274 : | psubsw %2, [Cst0002] | ||
275 : | pxor mm6, mm6 | ||
276 : | pxor mm7, mm7 | ||
277 : | pcmpgtw mm6, %1 | ||
278 : | pcmpgtw mm7, %2 | ||
279 : | paddsw %1, mm6 | ||
280 : | paddsw %2, mm7 | ||
281 : | psraw %1, 2 | ||
282 : | psraw %2, 2 | ||
283 : | psubsw %1, mm6 | ||
284 : | psubsw %2, mm7 | ||
285 : | |||
286 : | Isibaar | 1795 | ; mix with destination [TMP0] |
287 : | movq mm6, [TMP0] | ||
288 : | movq mm7, [TMP0] | ||
289 : | edgomez | 851 | punpcklbw mm6, [Cst0] |
290 : | punpckhbw mm7, [Cst0] | ||
291 : | paddsw %1, mm6 | ||
292 : | paddsw %2, mm7 | ||
293 : | packuswb %1,%2 | ||
294 : | Isibaar | 1795 | movq [TMP0], %1 |
295 : | edgomez | 851 | %endmacro |
296 : | |||
297 : | %macro STORE_ADD_2 2 | ||
298 : | pxor mm6, mm6 | ||
299 : | pxor mm7, mm7 | ||
300 : | pcmpgtw mm6, %1 | ||
301 : | pcmpgtw mm7, %2 | ||
302 : | paddsw %1, mm6 | ||
303 : | paddsw %2, mm7 | ||
304 : | psraw %1, 4 | ||
305 : | psraw %2, 4 | ||
306 : | psubsw %1, mm6 | ||
307 : | psubsw %2, mm7 | ||
308 : | |||
309 : | pxor mm6, mm6 | ||
310 : | pxor mm7, mm7 | ||
311 : | pcmpgtw mm6, mm4 | ||
312 : | pcmpgtw mm7, mm5 | ||
313 : | paddsw mm4, mm6 | ||
314 : | paddsw mm5, mm7 | ||
315 : | psraw mm4, 4 | ||
316 : | psraw mm5, 4 | ||
317 : | psubsw mm4, mm6 | ||
318 : | psubsw mm5, mm7 | ||
319 : | |||
320 : | ; mix with destination | ||
321 : | Isibaar | 1795 | movq mm6, [TMP0] |
322 : | movq mm7, [TMP0] | ||
323 : | edgomez | 851 | punpcklbw mm6, [Cst0] |
324 : | punpckhbw mm7, [Cst0] | ||
325 : | paddsw %1, mm6 | ||
326 : | paddsw %2, mm7 | ||
327 : | |||
328 : | Isibaar | 1795 | movq mm6, [TMP0+_EAX] |
329 : | movq mm7, [TMP0+_EAX] | ||
330 : | edgomez | 851 | |
331 : | punpcklbw mm6, [Cst0] | ||
332 : | punpckhbw mm7, [Cst0] | ||
333 : | paddsw mm4, mm6 | ||
334 : | paddsw mm5, mm7 | ||
335 : | |||
336 : | packuswb %1,%2 | ||
337 : | packuswb mm4, mm5 | ||
338 : | |||
339 : | Isibaar | 1795 | movq [TMP0], %1 |
340 : | movq [TMP0+_EAX], mm4 | ||
341 : | edgomez | 851 | |
342 : | Isibaar | 1795 | lea TMP0, [TMP0+2*_EAX] |
343 : | edgomez | 851 | %endmacro |
344 : | |||
345 : | ;////////////////////////////////////////////////////////////////////// | ||
346 : | |||
347 : | Isibaar | 1795 | align SECTION_ALIGN |
348 : | edgomez | 851 | xvid_Add_Upsampled_8x8_16To8_mmx: ; 579c |
349 : | |||
350 : | Isibaar | 1795 | mov TMP0, prm1 ; Dst |
351 : | mov TMP1, prm2 ; Src | ||
352 : | mov _EAX, prm3 ; BpS | ||
353 : | edgomez | 851 | |
354 : | COL03 mm0, mm1, 0 | ||
355 : | edgomez | 1382 | MUL_PACK mm0,mm1, [Up13], [Up31] |
356 : | edgomez | 851 | movq mm4, mm0 |
357 : | movq mm5, mm1 | ||
358 : | STORE_ADD_1 mm4, mm5 | ||
359 : | Isibaar | 1795 | add TMP0, _EAX |
360 : | edgomez | 851 | |
361 : | COL03 mm2, mm3, 1 | ||
362 : | MUL_PACK mm2,mm3, [Up13], [Up31] | ||
363 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
364 : | STORE_ADD_2 mm0, mm1 | ||
365 : | |||
366 : | COL03 mm0, mm1, 2 | ||
367 : | MUL_PACK mm0,mm1, [Up13], [Up31] | ||
368 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
369 : | STORE_ADD_2 mm2, mm3 | ||
370 : | |||
371 : | COL03 mm2, mm3, 3 | ||
372 : | MUL_PACK mm2,mm3, [Up13], [Up31] | ||
373 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
374 : | STORE_ADD_2 mm0, mm1 | ||
375 : | |||
376 : | COL03 mm0, mm1, 4 | ||
377 : | MUL_PACK mm0,mm1, [Up13], [Up31] | ||
378 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
379 : | STORE_ADD_2 mm2, mm3 | ||
380 : | |||
381 : | COL03 mm2, mm3, 5 | ||
382 : | MUL_PACK mm2,mm3, [Up13], [Up31] | ||
383 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
384 : | STORE_ADD_2 mm0, mm1 | ||
385 : | |||
386 : | COL03 mm0, mm1, 6 | ||
387 : | MUL_PACK mm0,mm1, [Up13], [Up31] | ||
388 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
389 : | STORE_ADD_2 mm2, mm3 | ||
390 : | |||
391 : | COL03 mm2, mm3, 7 | ||
392 : | MUL_PACK mm2,mm3, [Up13], [Up31] | ||
393 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
394 : | STORE_ADD_2 mm0, mm1 | ||
395 : | |||
396 : | STORE_ADD_1 mm2, mm3 | ||
397 : | |||
398 : | |||
399 : | Isibaar | 1795 | mov TMP0, prm1 |
400 : | add TMP0, 8 | ||
401 : | edgomez | 851 | |
402 : | COL47 mm0, mm1, 0 | ||
403 : | edgomez | 1382 | MUL_PACK mm0,mm1, [Up13], [Up31] |
404 : | edgomez | 851 | movq mm4, mm0 |
405 : | edgomez | 1382 | movq mm5, mm1 |
406 : | edgomez | 851 | STORE_ADD_1 mm4, mm5 |
407 : | Isibaar | 1795 | add TMP0, _EAX |
408 : | edgomez | 851 | |
409 : | COL47 mm2, mm3, 1 | ||
410 : | MUL_PACK mm2,mm3, [Up13], [Up31] | ||
411 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
412 : | STORE_ADD_2 mm0, mm1 | ||
413 : | |||
414 : | COL47 mm0, mm1, 2 | ||
415 : | MUL_PACK mm0,mm1, [Up13], [Up31] | ||
416 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
417 : | STORE_ADD_2 mm2, mm3 | ||
418 : | |||
419 : | COL47 mm2, mm3, 3 | ||
420 : | MUL_PACK mm2,mm3, [Up13], [Up31] | ||
421 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
422 : | STORE_ADD_2 mm0, mm1 | ||
423 : | |||
424 : | COL47 mm0, mm1, 4 | ||
425 : | MUL_PACK mm0,mm1, [Up13], [Up31] | ||
426 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
427 : | STORE_ADD_2 mm2, mm3 | ||
428 : | |||
429 : | COL47 mm2, mm3, 5 | ||
430 : | MUL_PACK mm2,mm3, [Up13], [Up31] | ||
431 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
432 : | STORE_ADD_2 mm0, mm1 | ||
433 : | |||
434 : | COL47 mm0, mm1, 6 | ||
435 : | MUL_PACK mm0,mm1, [Up13], [Up31] | ||
436 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
437 : | STORE_ADD_2 mm2, mm3 | ||
438 : | |||
439 : | COL47 mm2, mm3, 7 | ||
440 : | MUL_PACK mm2,mm3, [Up13], [Up31] | ||
441 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
442 : | STORE_ADD_2 mm0, mm1 | ||
443 : | |||
444 : | STORE_ADD_1 mm2, mm3 | ||
445 : | |||
446 : | ret | ||
447 : | Isibaar | 1793 | ENDFUNC |
448 : | edgomez | 851 | |
449 : | ;=========================================================================== | ||
450 : | ; | ||
451 : | edgomez | 1382 | ; void xvid_Copy_Upsampled_8x8_16To8_xmm(uint8_t *Dst, |
452 : | edgomez | 851 | ; const int16_t *Src, const int BpS); |
453 : | ; | ||
454 : | ;=========================================================================== | ||
455 : | |||
456 : | ; xmm version can take (little) advantage of 'pshufw' | ||
457 : | |||
458 : | %macro COL03_SSE 3 ;%1/%2: regs, %3: row -trashes mm4/mm5 | ||
459 : | Isibaar | 1795 | movq %2, [TMP1+%3*16+0*2] ; <- 0|1|2|3 |
460 : | edgomez | 851 | pshufw %1, %2, (0+0*4+0*16+1*64) ; %1 = 0|0|0|1 |
461 : | pshufw mm4, %2, (0+1*4+1*16+2*64) ; mm4= 0|1|1|2 | ||
462 : | pshufw %2, %2, (1+2*4+2*16+3*64) ; %2 = 1|2|2|3 | ||
463 : | Isibaar | 1795 | pshufw mm5, [TMP1+%3*16+2*2], (0+1*4+1*16+2*64) ; mm5 = 2|3|3|4 |
464 : | edgomez | 851 | %endmacro |
465 : | |||
466 : | %macro COL47_SSE 3 ;%1-%2: regs, %3: row -trashes mm4/mm5 | ||
467 : | Isibaar | 1795 | pshufw %1, [TMP1+%3*16+2*2], (1+2*4+2*16+3*64) ; 3|4|4|5 |
468 : | movq mm5, [TMP1+%3*16+2*4] ; <- 4|5|6|7 | ||
469 : | edgomez | 851 | pshufw mm4, mm5, (0+1*4+1*16+2*64) ; 4|5|5|6 |
470 : | pshufw %2, mm5, (1+2*4+2*16+3*64) ; 5|6|6|7 | ||
471 : | pshufw mm5, mm5, (2+3*4+3*16+3*64) ; 6|7|7|7 | ||
472 : | %endmacro | ||
473 : | |||
474 : | |||
475 : | ;////////////////////////////////////////////////////////////////////// | ||
476 : | |||
477 : | Isibaar | 1795 | align SECTION_ALIGN |
478 : | edgomez | 851 | xvid_Copy_Upsampled_8x8_16To8_xmm: ; 315c |
479 : | |||
480 : | Isibaar | 1795 | mov TMP0, prm1 ; Dst |
481 : | mov TMP1, prm2 ; Src | ||
482 : | mov _EAX, prm3 ; BpS | ||
483 : | edgomez | 851 | |
484 : | movq mm6, [Up13] | ||
485 : | movq mm7, [Up31] | ||
486 : | |||
487 : | COL03_SSE mm0, mm1, 0 | ||
488 : | MUL_PACK mm0,mm1, mm6, mm7 | ||
489 : | movq mm4, mm0 | ||
490 : | movq mm5, mm1 | ||
491 : | STORE_1 mm4, mm5 | ||
492 : | Isibaar | 1795 | add TMP0, _EAX |
493 : | edgomez | 851 | |
494 : | COL03_SSE mm2, mm3, 1 | ||
495 : | MUL_PACK mm2,mm3, mm6, mm7 | ||
496 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
497 : | STORE_2 mm0, mm1 | ||
498 : | |||
499 : | COL03_SSE mm0, mm1, 2 | ||
500 : | MUL_PACK mm0,mm1, mm6, mm7 | ||
501 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
502 : | STORE_2 mm2, mm3 | ||
503 : | |||
504 : | COL03_SSE mm2, mm3, 3 | ||
505 : | MUL_PACK mm2,mm3, mm6, mm7 | ||
506 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
507 : | STORE_2 mm0, mm1 | ||
508 : | |||
509 : | COL03_SSE mm0, mm1, 4 | ||
510 : | MUL_PACK mm0,mm1, mm6, mm7 | ||
511 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
512 : | STORE_2 mm2, mm3 | ||
513 : | |||
514 : | COL03_SSE mm2, mm3, 5 | ||
515 : | MUL_PACK mm2,mm3, mm6, mm7 | ||
516 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
517 : | STORE_2 mm0, mm1 | ||
518 : | |||
519 : | COL03_SSE mm0, mm1, 6 | ||
520 : | MUL_PACK mm0,mm1, mm6, mm7 | ||
521 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
522 : | STORE_2 mm2, mm3 | ||
523 : | |||
524 : | COL03_SSE mm2, mm3, 7 | ||
525 : | MUL_PACK mm2,mm3, mm6, mm7 | ||
526 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
527 : | STORE_2 mm0, mm1 | ||
528 : | |||
529 : | STORE_1 mm2, mm3 | ||
530 : | |||
531 : | Isibaar | 1795 | mov TMP0, prm1 |
532 : | add TMP0, 8 | ||
533 : | edgomez | 851 | |
534 : | COL47_SSE mm0, mm1, 0 | ||
535 : | MUL_PACK mm0,mm1, mm6, mm7 | ||
536 : | movq mm4, mm0 | ||
537 : | movq mm5, mm1 | ||
538 : | STORE_1 mm4, mm5 | ||
539 : | Isibaar | 1795 | add TMP0, _EAX |
540 : | edgomez | 851 | |
541 : | COL47_SSE mm2, mm3, 1 | ||
542 : | MUL_PACK mm2,mm3, mm6, mm7 | ||
543 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
544 : | STORE_2 mm0, mm1 | ||
545 : | |||
546 : | COL47_SSE mm0, mm1, 2 | ||
547 : | MUL_PACK mm0,mm1, mm6, mm7 | ||
548 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
549 : | STORE_2 mm2, mm3 | ||
550 : | |||
551 : | COL47_SSE mm2, mm3, 3 | ||
552 : | MUL_PACK mm2,mm3, mm6, mm7 | ||
553 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
554 : | STORE_2 mm0, mm1 | ||
555 : | |||
556 : | COL47_SSE mm0, mm1, 4 | ||
557 : | MUL_PACK mm0,mm1, mm6, mm7 | ||
558 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
559 : | STORE_2 mm2, mm3 | ||
560 : | |||
561 : | COL47_SSE mm2, mm3, 5 | ||
562 : | MUL_PACK mm2,mm3, mm6, mm7 | ||
563 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
564 : | STORE_2 mm0, mm1 | ||
565 : | |||
566 : | COL47_SSE mm0, mm1, 6 | ||
567 : | MUL_PACK mm0,mm1, mm6, mm7 | ||
568 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
569 : | STORE_2 mm2, mm3 | ||
570 : | |||
571 : | COL47_SSE mm2, mm3, 7 | ||
572 : | MUL_PACK mm2,mm3, mm6, mm7 | ||
573 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
574 : | STORE_2 mm0, mm1 | ||
575 : | |||
576 : | STORE_1 mm2, mm3 | ||
577 : | |||
578 : | ret | ||
579 : | Isibaar | 1793 | ENDFUNC |
580 : | edgomez | 851 | |
581 : | ;=========================================================================== | ||
582 : | ; | ||
583 : | edgomez | 1382 | ; void xvid_Add_Upsampled_8x8_16To8_xmm(uint8_t *Dst, |
584 : | edgomez | 851 | ; const int16_t *Src, const int BpS); |
585 : | ; | ||
586 : | ;=========================================================================== | ||
587 : | |||
588 : | Isibaar | 1795 | align SECTION_ALIGN |
589 : | edgomez | 851 | xvid_Add_Upsampled_8x8_16To8_xmm: ; 549c |
590 : | |||
591 : | Isibaar | 1795 | mov TMP0, prm1 ; Dst |
592 : | mov TMP1, prm2 ; Src | ||
593 : | mov _EAX, prm3 ; BpS | ||
594 : | edgomez | 851 | |
595 : | COL03_SSE mm0, mm1, 0 | ||
596 : | edgomez | 1382 | MUL_PACK mm0,mm1, [Up13], [Up31] |
597 : | edgomez | 851 | movq mm4, mm0 |
598 : | movq mm5, mm1 | ||
599 : | STORE_ADD_1 mm4, mm5 | ||
600 : | Isibaar | 1795 | add TMP0, _EAX |
601 : | edgomez | 851 | |
602 : | COL03_SSE mm2, mm3, 1 | ||
603 : | MUL_PACK mm2,mm3, [Up13], [Up31] | ||
604 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
605 : | STORE_ADD_2 mm0, mm1 | ||
606 : | |||
607 : | COL03_SSE mm0, mm1, 2 | ||
608 : | MUL_PACK mm0,mm1, [Up13], [Up31] | ||
609 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
610 : | STORE_ADD_2 mm2, mm3 | ||
611 : | |||
612 : | COL03_SSE mm2, mm3, 3 | ||
613 : | MUL_PACK mm2,mm3, [Up13], [Up31] | ||
614 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
615 : | STORE_ADD_2 mm0, mm1 | ||
616 : | |||
617 : | COL03_SSE mm0, mm1, 4 | ||
618 : | MUL_PACK mm0,mm1, [Up13], [Up31] | ||
619 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
620 : | STORE_ADD_2 mm2, mm3 | ||
621 : | |||
622 : | COL03_SSE mm2, mm3, 5 | ||
623 : | MUL_PACK mm2,mm3, [Up13], [Up31] | ||
624 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
625 : | STORE_ADD_2 mm0, mm1 | ||
626 : | |||
627 : | COL03_SSE mm0, mm1, 6 | ||
628 : | MUL_PACK mm0,mm1, [Up13], [Up31] | ||
629 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
630 : | STORE_ADD_2 mm2, mm3 | ||
631 : | |||
632 : | COL03_SSE mm2, mm3, 7 | ||
633 : | MUL_PACK mm2,mm3, [Up13], [Up31] | ||
634 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
635 : | STORE_ADD_2 mm0, mm1 | ||
636 : | |||
637 : | STORE_ADD_1 mm2, mm3 | ||
638 : | |||
639 : | |||
640 : | Isibaar | 1795 | mov TMP0, prm1 |
641 : | add TMP0, 8 | ||
642 : | edgomez | 851 | |
643 : | COL47_SSE mm0, mm1, 0 | ||
644 : | edgomez | 1382 | MUL_PACK mm0,mm1, [Up13], [Up31] |
645 : | edgomez | 851 | movq mm4, mm0 |
646 : | edgomez | 1382 | movq mm5, mm1 |
647 : | edgomez | 851 | STORE_ADD_1 mm4, mm5 |
648 : | Isibaar | 1795 | add TMP0, _EAX |
649 : | edgomez | 851 | |
650 : | COL47_SSE mm2, mm3, 1 | ||
651 : | MUL_PACK mm2,mm3, [Up13], [Up31] | ||
652 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
653 : | STORE_ADD_2 mm0, mm1 | ||
654 : | |||
655 : | COL47_SSE mm0, mm1, 2 | ||
656 : | MUL_PACK mm0,mm1, [Up13], [Up31] | ||
657 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
658 : | STORE_ADD_2 mm2, mm3 | ||
659 : | |||
660 : | COL47_SSE mm2, mm3, 3 | ||
661 : | MUL_PACK mm2,mm3, [Up13], [Up31] | ||
662 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
663 : | STORE_ADD_2 mm0, mm1 | ||
664 : | |||
665 : | COL47_SSE mm0, mm1, 4 | ||
666 : | MUL_PACK mm0,mm1, [Up13], [Up31] | ||
667 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
668 : | STORE_ADD_2 mm2, mm3 | ||
669 : | |||
670 : | COL47_SSE mm2, mm3, 5 | ||
671 : | MUL_PACK mm2,mm3, [Up13], [Up31] | ||
672 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
673 : | STORE_ADD_2 mm0, mm1 | ||
674 : | |||
675 : | COL47_SSE mm0, mm1, 6 | ||
676 : | MUL_PACK mm0,mm1, [Up13], [Up31] | ||
677 : | MIX_ROWS mm2, mm3, mm0, mm1 | ||
678 : | STORE_ADD_2 mm2, mm3 | ||
679 : | |||
680 : | COL47_SSE mm2, mm3, 7 | ||
681 : | MUL_PACK mm2,mm3, [Up13], [Up31] | ||
682 : | MIX_ROWS mm0, mm1, mm2, mm3 | ||
683 : | STORE_ADD_2 mm0, mm1 | ||
684 : | |||
685 : | STORE_ADD_1 mm2, mm3 | ||
686 : | |||
687 : | ret | ||
688 : | Isibaar | 1793 | ENDFUNC |
689 : | edgomez | 851 | |
690 : | |||
691 : | ;=========================================================================== | ||
692 : | ; | ||
693 : | ; void xvid_HFilter_31_mmx(uint8_t *Src1, uint8_t *Src2, int Nb_Blks); | ||
694 : | ; void xvid_VFilter_31_x86(uint8_t *Src1, uint8_t *Src2, const int BpS, int Nb_Blks); | ||
695 : | ; void xvid_HFilter_31_x86(uint8_t *Src1, uint8_t *Src2, int Nb_Blks); | ||
696 : | ; | ||
697 : | ;=========================================================================== | ||
698 : | |||
699 : | ;////////////////////////////////////////////////////////////////////// | ||
700 : | ;// horizontal/vertical filtering: [x,y] -> [ (3x+y+2)>>2, (x+3y+2)>>2 ] | ||
701 : | ;// | ||
702 : | ;// We use the trick: tmp = (x+y+2) -> [x = (tmp+2x)>>2, y = (tmp+2y)>>2] | ||
703 : | ;////////////////////////////////////////////////////////////////////// | ||
704 : | |||
705 : | Isibaar | 1795 | align SECTION_ALIGN |
706 : | edgomez | 851 | xvid_HFilter_31_mmx: |
707 : | Isibaar | 1795 | |
708 : | mov TMP0, prm1 ; Src1 | ||
709 : | mov TMP1, prm2 ; Src2 | ||
710 : | mov _EAX, prm3 ; Nb_Blks | ||
711 : | lea _EAX, [_EAX*2] | ||
712 : | edgomez | 851 | movq mm5, [Cst2] |
713 : | pxor mm7, mm7 | ||
714 : | |||
715 : | Isibaar | 1795 | lea TMP0, [TMP0+_EAX*4] |
716 : | lea TMP1, [TMP1+_EAX*4] | ||
717 : | edgomez | 851 | |
718 : | Isibaar | 1795 | neg _EAX |
719 : | edgomez | 851 | |
720 : | .Loop: ;12c | ||
721 : | Isibaar | 1795 | movd mm0, [TMP0+_EAX*4] |
722 : | movd mm1, [TMP1+_EAX*4] | ||
723 : | edgomez | 1382 | movq mm2, mm5 |
724 : | edgomez | 851 | punpcklbw mm0, mm7 |
725 : | punpcklbw mm1, mm7 | ||
726 : | paddsw mm2, mm0 | ||
727 : | paddsw mm0, mm0 | ||
728 : | paddsw mm2, mm1 | ||
729 : | paddsw mm1, mm1 | ||
730 : | paddsw mm0, mm2 | ||
731 : | paddsw mm1, mm2 | ||
732 : | psraw mm0, 2 | ||
733 : | psraw mm1, 2 | ||
734 : | packuswb mm0, mm7 | ||
735 : | packuswb mm1, mm7 | ||
736 : | Isibaar | 1795 | movd [TMP0+_EAX*4], mm0 |
737 : | movd [TMP1+_EAX*4], mm1 | ||
738 : | Isibaar | 1834 | inc _EAX |
739 : | edgomez | 851 | jl .Loop |
740 : | |||
741 : | ret | ||
742 : | Isibaar | 1793 | ENDFUNC |
743 : | edgomez | 851 | |
744 : | ; mmx is of no use here. Better use plain ASM. Moreover, | ||
745 : | ; this is for the fun of ASM coding, coz' every modern compiler can | ||
746 : | ; end up with a code that looks very much like this one... | ||
747 : | |||
748 : | Isibaar | 1795 | align SECTION_ALIGN |
749 : | edgomez | 851 | xvid_VFilter_31_x86: |
750 : | Isibaar | 1795 | mov TMP0, prm1 ; Src1 |
751 : | mov TMP1, prm2 ; Src2 | ||
752 : | mov _EAX, prm4 ; Nb_Blks | ||
753 : | lea _EAX, [_EAX*8] | ||
754 : | edgomez | 851 | |
755 : | Isibaar | 1795 | push _ESI |
756 : | push _EDI | ||
757 : | push _EBX | ||
758 : | push _EBP | ||
759 : | |||
760 : | %ifdef ARCH_IS_X86_64 | ||
761 : | mov _EBP, prm3 | ||
762 : | %else | ||
763 : | mov _EBP, [_ESP+12 +16] ; BpS | ||
764 : | %endif | ||
765 : | |||
766 : | edgomez | 851 | .Loop: ;7c |
767 : | Isibaar | 1795 | movzx _ESI, byte [TMP0] |
768 : | movzx _EDI, byte [TMP1] | ||
769 : | edgomez | 851 | |
770 : | Isibaar | 1795 | lea _EBX,[_ESI+_EDI+2] |
771 : | lea _ESI,[_EBX+2*_ESI] | ||
772 : | lea _EDI,[_EBX+2*_EDI] | ||
773 : | edgomez | 851 | |
774 : | Isibaar | 1795 | shr _ESI,2 |
775 : | shr _EDI,2 | ||
776 : | mov [TMP0], cl | ||
777 : | mov [TMP1], dl | ||
778 : | lea TMP0, [TMP0+_EBP] | ||
779 : | lea TMP1, [TMP1+_EBP] | ||
780 : | dec _EAX | ||
781 : | edgomez | 851 | jg .Loop |
782 : | |||
783 : | Isibaar | 1795 | pop _EBP |
784 : | pop _EBX | ||
785 : | pop _EDI | ||
786 : | pop _ESI | ||
787 : | edgomez | 851 | ret |
788 : | Isibaar | 1793 | ENDFUNC |
789 : | edgomez | 851 | |
790 : | ; this one's just a little faster than gcc's code. Very little. | ||
791 : | |||
792 : | Isibaar | 1795 | align SECTION_ALIGN |
793 : | edgomez | 851 | xvid_HFilter_31_x86: |
794 : | |||
795 : | Isibaar | 1795 | mov TMP0, prm1 ; Src1 |
796 : | mov TMP1, prm2 ; Src2 | ||
797 : | mov _EAX, prm3 ; Nb_Blks | ||
798 : | edgomez | 851 | |
799 : | Isibaar | 1795 | lea _EAX,[_EAX*8] |
800 : | lea TMP0, [TMP0+_EAX] | ||
801 : | lea TMP1, [TMP0+_EAX] | ||
802 : | neg _EAX | ||
803 : | |||
804 : | push _ESI | ||
805 : | push _EDI | ||
806 : | push _EBX | ||
807 : | |||
808 : | edgomez | 851 | .Loop: ; 6c |
809 : | Isibaar | 1795 | movzx _ESI, byte [TMP0+_EAX] |
810 : | movzx _EDI, byte [TMP1+_EAX] | ||
811 : | edgomez | 851 | |
812 : | Isibaar | 1795 | lea _EBX, [_ESI+_EDI+2] |
813 : | lea _ESI,[_EBX+2*_ESI] | ||
814 : | lea _EDI,[_EBX+2*_EDI] | ||
815 : | shr _ESI,2 | ||
816 : | shr _EDI,2 | ||
817 : | mov [TMP0+_EAX], cl | ||
818 : | mov [TMP1+_EAX], dl | ||
819 : | inc _EAX | ||
820 : | edgomez | 851 | |
821 : | jl .Loop | ||
822 : | |||
823 : | Isibaar | 1795 | pop _EBX |
824 : | pop _EDI | ||
825 : | pop _ESI | ||
826 : | edgomez | 851 | ret |
827 : | Isibaar | 1793 | ENDFUNC |
828 : | edgomez | 851 | |
829 : | ;////////////////////////////////////////////////////////////////////// | ||
830 : | ;// 16b downsampling 16x16 -> 8x8 | ||
831 : | ;////////////////////////////////////////////////////////////////////// | ||
832 : | |||
833 : | %macro HFILTER_1331 2 ;%1:src %2:dst reg. -trashes mm0/mm1/mm2 | ||
834 : | movq mm2, [Mask_ff] | ||
835 : | movq %2, [%1-1] ;-10123456 | ||
836 : | movq mm0, [%1] ; 01234567 | ||
837 : | movq mm1, [%1+1] ; 12345678 | ||
838 : | pand %2, mm2 ;-1|1|3|5 | ||
839 : | pand mm0, mm2 ; 0|2|4|6 | ||
840 : | pand mm1, mm2 ; 1|3|5|7 | ||
841 : | pand mm2, [%1+2] ; 2|4|6|8 | ||
842 : | paddusw mm0, mm1 | ||
843 : | paddusw %2, mm2 | ||
844 : | pmullw mm0, mm7 | ||
845 : | paddusw %2, mm0 | ||
846 : | %endmacro | ||
847 : | |||
848 : | %macro VFILTER_1331 4 ; %1-4: regs %1-%2: trashed | ||
849 : | paddsw %1, [Cst32] | ||
850 : | edgomez | 1382 | paddsw %2, %3 |
851 : | edgomez | 851 | pmullw %2, mm7 |
852 : | paddsw %1,%4 | ||
853 : | paddsw %1, %2 | ||
854 : | psraw %1, 6 | ||
855 : | %endmacro | ||
856 : | |||
857 : | ;=========================================================================== | ||
858 : | ; | ||
859 : | ; void xvid_Filter_18x18_To_8x8_mmx(int16_t *Dst, | ||
860 : | ; const uint8_t *Src, const int BpS); | ||
861 : | ; | ||
862 : | ;=========================================================================== | ||
863 : | |||
864 : | %macro COPY_TWO_LINES_1331 1 ; %1: dst | ||
865 : | Isibaar | 1795 | HFILTER_1331 TMP1 , mm5 |
866 : | HFILTER_1331 TMP1+_EAX, mm6 | ||
867 : | lea TMP1, [TMP1+2*_EAX] | ||
868 : | edgomez | 851 | VFILTER_1331 mm3,mm4,mm5, mm6 |
869 : | movq [%1], mm3 | ||
870 : | |||
871 : | Isibaar | 1795 | HFILTER_1331 TMP1 , mm3 |
872 : | HFILTER_1331 TMP1+_EAX, mm4 | ||
873 : | lea TMP1, [TMP1+2*_EAX] | ||
874 : | edgomez | 851 | VFILTER_1331 mm5,mm6,mm3,mm4 |
875 : | movq [%1+16], mm5 | ||
876 : | %endmacro | ||
877 : | |||
878 : | Isibaar | 1795 | align SECTION_ALIGN |
879 : | edgomez | 851 | xvid_Filter_18x18_To_8x8_mmx: ; 283c (~4.4c per output pixel) |
880 : | |||
881 : | Isibaar | 1795 | mov TMP0, prm1 ; Dst |
882 : | mov TMP1, prm2 ; Src | ||
883 : | mov _EAX, prm3 ; BpS | ||
884 : | edgomez | 851 | |
885 : | movq mm7, [Cst3] | ||
886 : | Isibaar | 1795 | sub TMP1, _EAX |
887 : | edgomez | 851 | |
888 : | ; mm3/mm4/mm5/mm6 is used as a 4-samples delay line. | ||
889 : | |||
890 : | ; process columns 0-3 | ||
891 : | |||
892 : | Isibaar | 1795 | HFILTER_1331 TMP1 , mm3 ; pre-load mm3/mm4 |
893 : | HFILTER_1331 TMP1+_EAX, mm4 | ||
894 : | lea TMP1, [TMP1+2*_EAX] | ||
895 : | edgomez | 851 | |
896 : | Isibaar | 1795 | COPY_TWO_LINES_1331 TMP0 + 0*16 |
897 : | COPY_TWO_LINES_1331 TMP0 + 2*16 | ||
898 : | COPY_TWO_LINES_1331 TMP0 + 4*16 | ||
899 : | COPY_TWO_LINES_1331 TMP0 + 6*16 | ||
900 : | edgomez | 851 | |
901 : | ; process columns 4-7 | ||
902 : | |||
903 : | Isibaar | 1795 | mov TMP1, prm2 |
904 : | sub TMP1, _EAX | ||
905 : | add TMP1, 8 | ||
906 : | edgomez | 851 | |
907 : | Isibaar | 1795 | HFILTER_1331 TMP1 , mm3 ; pre-load mm3/mm4 |
908 : | HFILTER_1331 TMP1+_EAX, mm4 | ||
909 : | lea TMP1, [TMP1+2*_EAX] | ||
910 : | edgomez | 851 | |
911 : | Isibaar | 1795 | COPY_TWO_LINES_1331 TMP0 + 0*16 +8 |
912 : | COPY_TWO_LINES_1331 TMP0 + 2*16 +8 | ||
913 : | COPY_TWO_LINES_1331 TMP0 + 4*16 +8 | ||
914 : | COPY_TWO_LINES_1331 TMP0 + 6*16 +8 | ||
915 : | edgomez | 851 | |
916 : | ret | ||
917 : | Isibaar | 1793 | ENDFUNC |
918 : | edgomez | 851 | |
919 : | ;=========================================================================== | ||
920 : | ; | ||
921 : | ; void xvid_Filter_Diff_18x18_To_8x8_mmx(int16_t *Dst, | ||
922 : | ; const uint8_t *Src, const int BpS); | ||
923 : | ; | ||
924 : | ;=========================================================================== | ||
925 : | |||
926 : | %macro DIFF_TWO_LINES_1331 1 ; %1: dst | ||
927 : | Isibaar | 1795 | HFILTER_1331 TMP1 , mm5 |
928 : | HFILTER_1331 TMP1+_EAX, mm6 | ||
929 : | lea TMP1, [TMP1+2*_EAX] | ||
930 : | edgomez | 851 | movq mm2, [%1] |
931 : | VFILTER_1331 mm3,mm4,mm5, mm6 | ||
932 : | psubsw mm2, mm3 | ||
933 : | movq [%1], mm2 | ||
934 : | |||
935 : | Isibaar | 1795 | HFILTER_1331 TMP1 , mm3 |
936 : | HFILTER_1331 TMP1+_EAX, mm4 | ||
937 : | lea TMP1, [TMP1+2*_EAX] | ||
938 : | edgomez | 851 | movq mm2, [%1+16] |
939 : | VFILTER_1331 mm5,mm6,mm3,mm4 | ||
940 : | psubsw mm2, mm5 | ||
941 : | movq [%1+16], mm2 | ||
942 : | %endmacro | ||
943 : | |||
944 : | Isibaar | 1795 | align SECTION_ALIGN |
945 : | edgomez | 851 | xvid_Filter_Diff_18x18_To_8x8_mmx: ; 302c |
946 : | |||
947 : | Isibaar | 1795 | mov TMP0, prm1 ; Dst |
948 : | mov TMP1, prm2 ; Src | ||
949 : | mov _EAX, prm3 ; BpS | ||
950 : | edgomez | 851 | |
951 : | movq mm7, [Cst3] | ||
952 : | Isibaar | 1795 | sub TMP1, _EAX |
953 : | edgomez | 851 | |
954 : | ; mm3/mm4/mm5/mm6 is used as a 4-samples delay line. | ||
955 : | |||
956 : | ; process columns 0-3 | ||
957 : | |||
958 : | Isibaar | 1795 | HFILTER_1331 TMP1 , mm3 ; pre-load mm3/mm4 |
959 : | HFILTER_1331 TMP1+_EAX, mm4 | ||
960 : | lea TMP1, [TMP1+2*_EAX] | ||
961 : | edgomez | 851 | |
962 : | Isibaar | 1795 | DIFF_TWO_LINES_1331 TMP0 + 0*16 |
963 : | DIFF_TWO_LINES_1331 TMP0 + 2*16 | ||
964 : | DIFF_TWO_LINES_1331 TMP0 + 4*16 | ||
965 : | DIFF_TWO_LINES_1331 TMP0 + 6*16 | ||
966 : | edgomez | 851 | |
967 : | ; process columns 4-7 | ||
968 : | Isibaar | 1795 | mov TMP1, prm2 |
969 : | sub TMP1, _EAX | ||
970 : | add TMP1, 8 | ||
971 : | edgomez | 851 | |
972 : | Isibaar | 1795 | HFILTER_1331 TMP1 , mm3 ; pre-load mm3/mm4 |
973 : | HFILTER_1331 TMP1+_EAX, mm4 | ||
974 : | lea TMP1, [TMP1+2*_EAX] | ||
975 : | edgomez | 851 | |
976 : | Isibaar | 1795 | DIFF_TWO_LINES_1331 TMP0 + 0*16 +8 |
977 : | DIFF_TWO_LINES_1331 TMP0 + 2*16 +8 | ||
978 : | DIFF_TWO_LINES_1331 TMP0 + 4*16 +8 | ||
979 : | DIFF_TWO_LINES_1331 TMP0 + 6*16 +8 | ||
980 : | edgomez | 851 | |
981 : | ret | ||
982 : | Isibaar | 1793 | ENDFUNC |
983 : | edgomez | 851 | |
984 : | ;////////////////////////////////////////////////////////////////////// | ||
985 : | |||
986 : | ; pfeewwww... Never Do That On Stage Again. :) | ||
987 : | |||
988 : | Isibaar | 1877 | NON_EXEC_STACK |
No admin address has been configured | ViewVC Help |
Powered by ViewVC 1.0.4 |