Parent Directory | Revision Log
Revision 651 - (view) (download)
1 : | chl | 434 | ;/***************************************************************************** |
2 : | Isibaar | 3 | ; * |
3 : | chl | 434 | ; * XVID MPEG-4 VIDEO CODEC |
4 : | ; * mmx yuv planar to yuyv/uyvy conversion | ||
5 : | Isibaar | 3 | ; * |
6 : | chl | 434 | ; * Copyright(C) 2002 Peter Ross <pross@xvid.org> |
7 : | Isibaar | 3 | ; * |
8 : | edgomez | 651 | ; * This file is part of XviD, a free MPEG-4 video encoder/decoder |
9 : | Isibaar | 3 | ; * |
10 : | edgomez | 651 | ; * XviD is free software; you can redistribute it and/or modify it |
11 : | ; * under the terms of the GNU General Public License as published by | ||
12 : | chl | 434 | ; * the Free Software Foundation; either version 2 of the License, or |
13 : | ; * (at your option) any later version. | ||
14 : | Isibaar | 3 | ; * |
15 : | chl | 434 | ; * This program is distributed in the hope that it will be useful, |
16 : | ; * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 : | ; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 : | ; * GNU General Public License for more details. | ||
19 : | Isibaar | 3 | ; * |
20 : | chl | 434 | ; * You should have received a copy of the GNU General Public License |
21 : | ; * along with this program; if not, write to the Free Software | ||
22 : | ; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
23 : | Isibaar | 3 | ; * |
24 : | edgomez | 651 | ; * Under section 8 of the GNU General Public License, the copyright |
25 : | ; * holders of XVID explicitly forbid distribution in the following | ||
26 : | ; * countries: | ||
27 : | ; * | ||
28 : | ; * - Japan | ||
29 : | ; * - United States of America | ||
30 : | ; * | ||
31 : | ; * Linking XviD statically or dynamically with other modules is making a | ||
32 : | ; * combined work based on XviD. Thus, the terms and conditions of the | ||
33 : | ; * GNU General Public License cover the whole combination. | ||
34 : | ; * | ||
35 : | ; * As a special exception, the copyright holders of XviD give you | ||
36 : | ; * permission to link XviD with independent modules that communicate with | ||
37 : | ; * XviD solely through the VFW1.1 and DShow interfaces, regardless of the | ||
38 : | ; * license terms of these independent modules, and to copy and distribute | ||
39 : | ; * the resulting combined work under terms of your choice, provided that | ||
40 : | ; * every copy of the combined work is accompanied by a complete copy of | ||
41 : | ; * the source code of XviD (the version of XviD used to produce the | ||
42 : | ; * combined work), being distributed under the terms of the GNU General | ||
43 : | ; * Public License plus this exception. An independent module is a module | ||
44 : | ; * which is not derived from or based on XviD. | ||
45 : | ; * | ||
46 : | ; * Note that people who make modified versions of XviD are not obligated | ||
47 : | ; * to grant this special exception for their modified versions; it is | ||
48 : | ; * their choice whether to do so. The GNU General Public License gives | ||
49 : | ; * permission to release a modified version without this exception; this | ||
50 : | ; * exception also makes it possible to release a modified version which | ||
51 : | ; * carries forward this exception. | ||
52 : | ; * | ||
53 : | ; * $Id: rgb_to_yv12_mmx.asm,v 1.3 2002-11-17 00:20:30 edgomez Exp $ | ||
54 : | ; * | ||
55 : | chl | 434 | ; ****************************************************************************/ |
56 : | Isibaar | 3 | |
57 : | bits 32 | ||
58 : | |||
59 : | |||
60 : | section .data | ||
61 : | |||
62 : | %macro cglobal 1 | ||
63 : | %ifdef PREFIX | ||
64 : | global _%1 | ||
65 : | %define %1 _%1 | ||
66 : | %else | ||
67 : | global %1 | ||
68 : | %endif | ||
69 : | %endmacro | ||
70 : | |||
71 : | align 16 | ||
72 : | |||
73 : | |||
74 : | ;=========================================================================== | ||
75 : | ; yuv constants | ||
76 : | ;=========================================================================== | ||
77 : | |||
78 : | %define Y_R 0.257 | ||
79 : | %define Y_G 0.504 | ||
80 : | %define Y_B 0.098 | ||
81 : | %define Y_ADD 16 | ||
82 : | |||
83 : | %define U_R 0.148 | ||
84 : | %define U_G 0.291 | ||
85 : | %define U_B 0.439 | ||
86 : | %define U_ADD 128 | ||
87 : | |||
88 : | %define V_R 0.439 | ||
89 : | %define V_G 0.368 | ||
90 : | %define V_B 0.071 | ||
91 : | %define V_ADD 128 | ||
92 : | |||
93 : | |||
94 : | ;=========================================================================== | ||
95 : | ; multiplication matrices | ||
96 : | ;=========================================================================== | ||
97 : | |||
98 : | ; %define SCALEBITS 8 | ||
99 : | |||
100 : | y_mul dw 25 ; FIX(Y_B) | ||
101 : | dw 129 ; FIX(Y_G) | ||
102 : | dw 66 ; FIX(Y_R) | ||
103 : | dw 0 | ||
104 : | |||
105 : | u_mul dw 112 ; FIX(U_B) | ||
106 : | dw -74 ; FIX(U_G) | ||
107 : | dw -38 ; FIX(U_R) | ||
108 : | dw 0 | ||
109 : | |||
110 : | v_mul dw -18 ; FIX(V_B) | ||
111 : | dw -94 ; FIX(V_G) | ||
112 : | dw 112 ; FIX(V_R) | ||
113 : | dw 0 | ||
114 : | |||
115 : | |||
116 : | |||
117 : | section .text | ||
118 : | |||
119 : | ;=========================================================================== | ||
120 : | ; | ||
121 : | ; void rgb24_to_yv12_mmx(uint8_t * const y_out, | ||
122 : | ; uint8_t * const u_out, | ||
123 : | ; uint8_t * const v_out, | ||
124 : | ; const uint8_t * const src, | ||
125 : | ; const uint32_t width, | ||
126 : | ; const uint32_t height, | ||
127 : | ; const uint32_t stride) | ||
128 : | ; | ||
129 : | ; always flips | ||
130 : | ; | ||
131 : | ;=========================================================================== | ||
132 : | |||
133 : | align 16 | ||
134 : | cglobal rgb24_to_yv12_mmx | ||
135 : | rgb24_to_yv12_mmx | ||
136 : | |||
137 : | push ebx | ||
138 : | push ecx | ||
139 : | push esi | ||
140 : | push edi | ||
141 : | push ebp ; STACK BASE = 20 | ||
142 : | |||
143 : | ; global consants | ||
144 : | |||
145 : | mov eax, [esp + 20 + 28] ; stride | ||
146 : | mov ecx, [esp + 20 + 20] ; width | ||
147 : | mov ebx, eax | ||
148 : | sub ebx, ecx | ||
149 : | shr ebx, 1 ; ebx = (stride-width) / 2; | ||
150 : | push ebx ; [esp + 20] = uv_dif | ||
151 : | ; STACK BASE = 24 | ||
152 : | |||
153 : | add eax, eax | ||
154 : | sub eax, ecx ; eax = 2*stride - width | ||
155 : | push eax ; [esp + 16] = y_dif | ||
156 : | ; STACK BASE = 28 | ||
157 : | |||
158 : | mov ebx, ecx ; | ||
159 : | shr ebx, 1 ; | ||
160 : | push ebx ; [esp + 12] = width/2 | ||
161 : | ; STACK BASE = 32 | ||
162 : | |||
163 : | mov edx, ecx | ||
164 : | add ecx, edx | ||
165 : | add ecx, edx ; ecx = 3*width (use 4 for rgb32) | ||
166 : | push ecx ; [esp + 8] = width3 | ||
167 : | ; STACK BASE = 36 | ||
168 : | |||
169 : | mov edx, ecx | ||
170 : | add edx, ecx | ||
171 : | add edx, ecx ; edx = 3*width3 | ||
172 : | push edx ; [esp + 4] = src_dif | ||
173 : | ; STACK BASE = 40 | ||
174 : | |||
175 : | mov esi, [esp + 40 + 16] ; src | ||
176 : | mov ebp, [esp + 40 + 24] ; eax = height | ||
177 : | mov eax, ebp | ||
178 : | sub eax, 2 | ||
179 : | mul ecx | ||
180 : | add esi, eax ; src += (height-2) * width3 | ||
181 : | |||
182 : | mov edi, [esp + 40 + 4] ; y_out | ||
183 : | mov ecx, [esp + 40 + 8] ; u_out | ||
184 : | mov edx, [esp + 40 + 12] ; v_out | ||
185 : | movq mm7, [y_mul] | ||
186 : | |||
187 : | shr ebp, 1 ; ebp = height / 2 | ||
188 : | push ebp ; [esp+0] = tmp | ||
189 : | ; STACK BASE = 44 | ||
190 : | |||
191 : | .yloop | ||
192 : | mov ebp, [esp + 12] ; ebp = width /2 | ||
193 : | |||
194 : | .xloop | ||
195 : | ; y_out | ||
196 : | |||
197 : | mov ebx, [esp + 8] ; ebx = width3 | ||
198 : | |||
199 : | pxor mm4, mm4 | ||
200 : | pxor mm5, mm5 | ||
201 : | movd mm0, [esi] ; src[0...] | ||
202 : | movd mm2, [esi+ebx] ; src[width3...] | ||
203 : | punpcklbw mm0, mm4 ; [ |b |g |r ] | ||
204 : | punpcklbw mm2, mm5 ; [ |b |g |r ] | ||
205 : | movq mm6, mm0 ; = [ |b4|g4|r4] | ||
206 : | paddw mm6, mm2 ; +[ |b4|g4|r4] | ||
207 : | pmaddwd mm0, mm7 ; *= Y_MUL | ||
208 : | pmaddwd mm2, mm7 ; *= Y_MUL | ||
209 : | movq mm4, mm0 ; [r] | ||
210 : | movq mm5, mm2 ; [r] | ||
211 : | psrlq mm4, 32 ; +[g] | ||
212 : | psrlq mm5, 32 ; +[g] | ||
213 : | paddd mm0, mm4 ; +[b] | ||
214 : | paddd mm2, mm5 ; +[b] | ||
215 : | |||
216 : | pxor mm4, mm4 | ||
217 : | pxor mm5, mm5 | ||
218 : | movd mm1, [esi+3] ; src[4...] | ||
219 : | movd mm3, [esi+ebx+3] ; src[width3+4...] | ||
220 : | punpcklbw mm1, mm4 ; [ |b |g |r ] | ||
221 : | punpcklbw mm3, mm5 ; [ |b |g |r ] | ||
222 : | paddw mm6, mm1 ; +[ |b4|g4|r4] | ||
223 : | paddw mm6, mm3 ; +[ |b4|g4|r4] | ||
224 : | pmaddwd mm1, mm7 ; *= Y_MUL | ||
225 : | pmaddwd mm3, mm7 ; *= Y_MUL | ||
226 : | movq mm4, mm1 ; [r] | ||
227 : | movq mm5, mm3 ; [r] | ||
228 : | psrlq mm4, 32 ; +[g] | ||
229 : | psrlq mm5, 32 ; +[g] | ||
230 : | paddd mm1, mm4 ; +[b] | ||
231 : | paddd mm3, mm5 ; +[b] | ||
232 : | |||
233 : | mov ebx, [esp + 44 + 28] ; stride | ||
234 : | |||
235 : | movd eax, mm0 | ||
236 : | shr eax, 8 | ||
237 : | add eax, Y_ADD | ||
238 : | mov [edi + ebx], al | ||
239 : | |||
240 : | movd eax, mm1 | ||
241 : | shr eax, 8 | ||
242 : | add eax, Y_ADD | ||
243 : | mov [edi + ebx + 1], al | ||
244 : | |||
245 : | movd eax, mm2 | ||
246 : | shr eax, 8 | ||
247 : | add eax, Y_ADD | ||
248 : | mov [edi], al | ||
249 : | |||
250 : | movd eax, mm3 | ||
251 : | shr eax, 8 | ||
252 : | add eax, Y_ADD | ||
253 : | mov [edi + 1], al | ||
254 : | |||
255 : | ; u_out, v_out | ||
256 : | |||
257 : | movq mm0, mm6 ; = [ |b4|g4|r4] | ||
258 : | pmaddwd mm6, [v_mul] ; *= V_MUL | ||
259 : | pmaddwd mm0, [u_mul] ; *= U_MUL | ||
260 : | movq mm1, mm0 | ||
261 : | movq mm2, mm6 | ||
262 : | psrlq mm1, 32 | ||
263 : | psrlq mm2, 32 | ||
264 : | paddd mm0, mm1 | ||
265 : | paddd mm2, mm6 | ||
266 : | |||
267 : | movd eax, mm0 | ||
268 : | shr eax, 10 | ||
269 : | add eax, U_ADD | ||
270 : | mov [ecx], al | ||
271 : | |||
272 : | movd eax, mm2 | ||
273 : | shr eax, 10 | ||
274 : | add eax, V_ADD | ||
275 : | mov [edx], al | ||
276 : | |||
277 : | add esi, 2 * 3 ; (use 4 for rgb32) | ||
278 : | add edi, 2 | ||
279 : | inc ecx | ||
280 : | inc edx | ||
281 : | |||
282 : | dec ebp | ||
283 : | jnz near .xloop | ||
284 : | |||
285 : | sub esi, [esp + 4] ; src -= src_dif | ||
286 : | add edi, [esp + 16] ; y_out += y_dif | ||
287 : | add ecx, [esp + 20] ; u_out += uv_dif | ||
288 : | add edx, [esp + 20] ; v_out += uv_dif | ||
289 : | |||
290 : | dec dword [esp+0] | ||
291 : | jnz near .yloop | ||
292 : | |||
293 : | emms | ||
294 : | |||
295 : | add esp, 24 | ||
296 : | pop ebp | ||
297 : | pop edi | ||
298 : | pop esi | ||
299 : | pop ecx | ||
300 : | pop ebx | ||
301 : | |||
302 : | ret | ||
303 : | |||
304 : | |||
305 : | |||
306 : | ;=========================================================================== | ||
307 : | ; | ||
308 : | ; void rgb32_to_yv12mmx(uint8_t * const y_out, | ||
309 : | ; uint8_t * const u_out, | ||
310 : | ; uint8_t * const v_out, | ||
311 : | ; const uint8_t * const src, | ||
312 : | ; const uint32_t width, | ||
313 : | ; const uint32_t height, | ||
314 : | ; const uint32_t stride) | ||
315 : | ; | ||
316 : | ; always flips | ||
317 : | ; | ||
318 : | ;=========================================================================== | ||
319 : | |||
320 : | align 16 | ||
321 : | cglobal rgb32_to_yv12_mmx | ||
322 : | rgb32_to_yv12_mmx | ||
323 : | |||
324 : | push ebx | ||
325 : | push ecx | ||
326 : | push esi | ||
327 : | push edi | ||
328 : | push ebp ; STACK BASE = 20 | ||
329 : | |||
330 : | ; global consants | ||
331 : | |||
332 : | mov eax, [esp + 20 + 28] ; stride | ||
333 : | mov ecx, [esp + 20 + 20] ; width | ||
334 : | mov ebx, eax | ||
335 : | sub ebx, ecx | ||
336 : | shr ebx, 1 ; ebx = (stride-width) / 2; | ||
337 : | push ebx ; [esp + 20] = uv_dif | ||
338 : | ; STACK BASE = 24 | ||
339 : | |||
340 : | add eax, eax | ||
341 : | sub eax, ecx ; eax = 2*stride - width | ||
342 : | push eax ; [esp + 16] = y_dif | ||
343 : | ; STACK BASE = 28 | ||
344 : | |||
345 : | mov ebx, ecx ; | ||
346 : | shr ebx, 1 ; | ||
347 : | push ebx ; [esp + 12] = width/2 | ||
348 : | ; STACK BASE = 32 | ||
349 : | |||
350 : | mov edx, ecx | ||
351 : | shl ecx, 2 ; ecx = 4*width (use 4 for rgb32) | ||
352 : | push ecx ; [esp + 8] = width4 | ||
353 : | ; STACK BASE = 36 | ||
354 : | |||
355 : | mov edx, ecx | ||
356 : | add edx, ecx | ||
357 : | add edx, ecx ; edx = 3*width4 | ||
358 : | push edx ; [esp + 4] = src_dif | ||
359 : | ; STACK BASE = 40 | ||
360 : | |||
361 : | mov esi, [esp + 40 + 16] ; src | ||
362 : | mov ebp, [esp + 40 + 24] ; eax = height | ||
363 : | mov eax, ebp | ||
364 : | sub eax, 2 | ||
365 : | mul ecx | ||
366 : | add esi, eax ; src += (height-2) * width4 | ||
367 : | |||
368 : | mov edi, [esp + 40 + 4] ; y_out | ||
369 : | mov ecx, [esp + 40 + 8] ; u_out | ||
370 : | mov edx, [esp + 40 + 12] ; v_out | ||
371 : | movq mm7, [y_mul] | ||
372 : | |||
373 : | shr ebp, 1 ; ebp = height / 2 | ||
374 : | push ebp ; [esp+0] = tmp | ||
375 : | ; STACK BASE = 44 | ||
376 : | |||
377 : | .yloop | ||
378 : | mov ebp, [esp + 12] ; ebp = width /2 | ||
379 : | |||
380 : | .xloop | ||
381 : | ; y_out | ||
382 : | |||
383 : | mov ebx, [esp + 8] ; ebx = width4 | ||
384 : | |||
385 : | pxor mm4, mm4 | ||
386 : | movq mm0, [esi] ; src[4... |0... ] | ||
387 : | movq mm2, [esi+ebx] ; src[width4+4...|width4...] | ||
388 : | movq mm1, mm0 | ||
389 : | movq mm3, mm2 | ||
390 : | punpcklbw mm0, mm4 ; [ |b |g |r ] | ||
391 : | punpcklbw mm2, mm4 ; [ |b |g |r ] | ||
392 : | punpckhbw mm1, mm4 ; [ |b |g |r ] | ||
393 : | punpckhbw mm3, mm4 ; [ |b |g |r ] | ||
394 : | |||
395 : | movq mm6, mm0 ; = [ |b4|g4|r4] | ||
396 : | paddw mm6, mm2 ; +[ |b4|g4|r4] | ||
397 : | pmaddwd mm0, mm7 ; *= Y_MUL | ||
398 : | pmaddwd mm2, mm7 ; *= Y_MUL | ||
399 : | movq mm4, mm0 ; [r] | ||
400 : | movq mm5, mm2 ; [r] | ||
401 : | psrlq mm4, 32 ; +[g] | ||
402 : | psrlq mm5, 32 ; +[g] | ||
403 : | paddd mm0, mm4 ; +[b] | ||
404 : | paddd mm2, mm5 ; +[b] | ||
405 : | |||
406 : | paddw mm6, mm1 ; +[ |b4|g4|r4] | ||
407 : | paddw mm6, mm3 ; +[ |b4|g4|r4] | ||
408 : | pmaddwd mm1, mm7 ; *= Y_MUL | ||
409 : | pmaddwd mm3, mm7 ; *= Y_MUL | ||
410 : | movq mm4, mm1 ; [r] | ||
411 : | movq mm5, mm3 ; [r] | ||
412 : | psrlq mm4, 32 ; +[g] | ||
413 : | psrlq mm5, 32 ; +[g] | ||
414 : | paddd mm1, mm4 ; +[b] | ||
415 : | paddd mm3, mm5 ; +[b] | ||
416 : | |||
417 : | mov ebx, [esp + 44 + 28] ; stride | ||
418 : | |||
419 : | movd eax, mm0 | ||
420 : | shr eax, 8 | ||
421 : | add eax, Y_ADD | ||
422 : | mov [edi + ebx], al | ||
423 : | |||
424 : | movd eax, mm1 | ||
425 : | shr eax, 8 | ||
426 : | add eax, Y_ADD | ||
427 : | mov [edi + ebx + 1], al | ||
428 : | |||
429 : | movd eax, mm2 | ||
430 : | shr eax, 8 | ||
431 : | add eax, Y_ADD | ||
432 : | mov [edi], al | ||
433 : | |||
434 : | movd eax, mm3 | ||
435 : | shr eax, 8 | ||
436 : | add eax, Y_ADD | ||
437 : | mov [edi + 1], al | ||
438 : | |||
439 : | ; u_out, v_out | ||
440 : | |||
441 : | movq mm0, mm6 ; = [ |b4|g4|r4] | ||
442 : | pmaddwd mm6, [v_mul] ; *= V_MUL | ||
443 : | pmaddwd mm0, [u_mul] ; *= U_MUL | ||
444 : | movq mm1, mm0 | ||
445 : | movq mm2, mm6 | ||
446 : | psrlq mm1, 32 | ||
447 : | psrlq mm2, 32 | ||
448 : | paddd mm0, mm1 | ||
449 : | paddd mm2, mm6 | ||
450 : | |||
451 : | movd eax, mm0 | ||
452 : | shr eax, 10 | ||
453 : | add eax, U_ADD | ||
454 : | mov [ecx], al | ||
455 : | |||
456 : | movd eax, mm2 | ||
457 : | shr eax, 10 | ||
458 : | add eax, V_ADD | ||
459 : | mov [edx], al | ||
460 : | |||
461 : | add esi, 2 * 4 ; (use 4 for rgb32) | ||
462 : | add edi, 2 | ||
463 : | inc ecx | ||
464 : | inc edx | ||
465 : | |||
466 : | dec ebp | ||
467 : | jnz near .xloop | ||
468 : | |||
469 : | sub esi, [esp + 4] ; src -= src_dif | ||
470 : | add edi, [esp + 16] ; y_out += y_dif | ||
471 : | add ecx, [esp + 20] ; u_out += uv_dif | ||
472 : | add edx, [esp + 20] ; v_out += uv_dif | ||
473 : | |||
474 : | dec dword [esp+0] | ||
475 : | jnz near .yloop | ||
476 : | |||
477 : | emms | ||
478 : | |||
479 : | add esp, 24 | ||
480 : | pop ebp | ||
481 : | pop edi | ||
482 : | pop esi | ||
483 : | pop ecx | ||
484 : | pop ebx | ||
485 : | |||
486 : | ret |
No admin address has been configured | ViewVC Help |
Powered by ViewVC 1.0.4 |