Parent Directory | Revision Log
Revision 434 - (view) (download)
1 : | chl | 434 | ;/***************************************************************************** |
2 : | Isibaar | 3 | ; * |
3 : | chl | 434 | ; * XVID MPEG-4 VIDEO CODEC |
4 : | ; * mmx yuv planar to yuyv/uyvy conversion | ||
5 : | Isibaar | 3 | ; * |
6 : | chl | 434 | ; * Copyright(C) 2002 Peter Ross <pross@xvid.org> |
7 : | Isibaar | 3 | ; * |
8 : | chl | 434 | ; * This program is an implementation of a part of one or more MPEG-4 |
9 : | ; * Video tools as specified in ISO/IEC 14496-2 standard. Those intending | ||
10 : | ; * to use this software module in hardware or software products are | ||
11 : | ; * advised that its use may infringe existing patents or copyrights, and | ||
12 : | ; * any such use would be at such party's own risk. The original | ||
13 : | ; * developer of this software module and his/her company, and subsequent | ||
14 : | ; * editors and their companies, will have no liability for use of this | ||
15 : | ; * software or modifications or derivatives thereof. | ||
16 : | Isibaar | 3 | ; * |
17 : | chl | 434 | ; * This program is free software; you can redistribute it and/or modify |
18 : | ; * it under the terms of the GNU General Public License as published by | ||
19 : | ; * the Free Software Foundation; either version 2 of the License, or | ||
20 : | ; * (at your option) any later version. | ||
21 : | Isibaar | 3 | ; * |
22 : | chl | 434 | ; * This program is distributed in the hope that it will be useful, |
23 : | ; * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
24 : | ; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
25 : | ; * GNU General Public License for more details. | ||
26 : | Isibaar | 3 | ; * |
27 : | chl | 434 | ; * You should have received a copy of the GNU General Public License |
28 : | ; * along with this program; if not, write to the Free Software | ||
29 : | ; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
30 : | Isibaar | 3 | ; * |
31 : | chl | 434 | ; ****************************************************************************/ |
32 : | Isibaar | 3 | |
33 : | bits 32 | ||
34 : | |||
35 : | |||
36 : | section .data | ||
37 : | |||
38 : | %macro cglobal 1 | ||
39 : | %ifdef PREFIX | ||
40 : | global _%1 | ||
41 : | %define %1 _%1 | ||
42 : | %else | ||
43 : | global %1 | ||
44 : | %endif | ||
45 : | %endmacro | ||
46 : | |||
47 : | align 16 | ||
48 : | |||
49 : | |||
50 : | section .text | ||
51 : | |||
52 : | |||
53 : | ;=========================================================================== | ||
54 : | ; | ||
55 : | ; void yv12_to_uyvy_mmx( | ||
56 : | ; uint8_t * dst, | ||
57 : | ; int dst_stride, | ||
58 : | ; uint8_t * y_src, | ||
59 : | ; uint8_t * u_src, | ||
60 : | ; uint8_t * v_src, | ||
61 : | ; int y_stride, | ||
62 : | ; int uv_stride, | ||
63 : | ; int width, | ||
64 : | ; int height); | ||
65 : | ; | ||
66 : | ; width must be multiple of 8 | ||
67 : | ; ~10% faster than plain c | ||
68 : | ; | ||
69 : | ;=========================================================================== | ||
70 : | |||
71 : | align 16 | ||
72 : | cglobal yv12_to_yuyv_mmx | ||
73 : | yv12_to_yuyv_mmx | ||
74 : | |||
75 : | push ebx | ||
76 : | push ecx | ||
77 : | push esi | ||
78 : | push edi | ||
79 : | push ebp ; STACK BASE = 20 | ||
80 : | |||
81 : | ; global constants | ||
82 : | |||
83 : | mov ebx, [esp + 20 + 32] ; width | ||
84 : | mov eax, [esp + 20 + 8] ; dst_stride | ||
85 : | sub eax, ebx ; | ||
86 : | add eax, eax ; eax = 2*(dst_stride - width) | ||
87 : | push eax ; [esp + 4] = dst_dif | ||
88 : | ; STACK BASE = 24 | ||
89 : | |||
90 : | shr ebx, 3 ; ebx = width / 8 | ||
91 : | mov edi, [esp + 24 + 4] ; dst | ||
92 : | |||
93 : | |||
94 : | ; --------- flip ------------- | ||
95 : | |||
96 : | mov ebp, [esp + 24 + 36] | ||
97 : | test ebp, ebp | ||
98 : | jl .flip | ||
99 : | |||
100 : | mov esi, [esp + 24 + 12] ; y_src | ||
101 : | mov ecx, [esp + 24 + 16] ; u_src | ||
102 : | mov edx, [esp + 24 + 20] ; v_src | ||
103 : | shr ebp, 1 ; y = height / 2 | ||
104 : | jmp short .yloop | ||
105 : | |||
106 : | |||
107 : | .flip | ||
108 : | neg ebp ; height = -height | ||
109 : | |||
110 : | mov eax, [esp + 24 + 24] ; y_stride | ||
111 : | lea edx, [ebp - 1] ; edx = height - 1 | ||
112 : | mul edx | ||
113 : | mov esi, [esp + 24 + 12] ; y_src | ||
114 : | add esi, eax ; y_src += (height - 1) * y_stride | ||
115 : | |||
116 : | shr ebp, 1 ; y = height / 2 | ||
117 : | mov eax, [esp + 24 + 28] ; uv_stride | ||
118 : | lea edx, [ebp - 1] ; edx = height/2 - 1 | ||
119 : | mul edx | ||
120 : | |||
121 : | mov ecx, [esp + 24 + 16] ; u_src | ||
122 : | mov edx, [esp + 24 + 20] ; v_src | ||
123 : | add ecx, eax ; u_src += (height/2 - 1) * uv_stride | ||
124 : | add edx, eax ; v_src += (height/2 - 1) * uv_stride | ||
125 : | |||
126 : | neg dword [esp + 24 + 24] ; y_stride = -y_stride | ||
127 : | neg dword [esp + 24 + 28] ; uv_stride = -uv_stride | ||
128 : | |||
129 : | .yloop | ||
130 : | xor eax, eax ; x = 0; | ||
131 : | |||
132 : | .xloop1 | ||
133 : | movd mm0, [ecx+4*eax] ; [ |uuuu] | ||
134 : | movd mm1, [edx+4*eax] ; [ |vvvv] | ||
135 : | movq mm2, [esi+8*eax] ; [yyyy|yyyy] | ||
136 : | |||
137 : | punpcklbw mm0, mm1 ; [vuvu|vuvu] | ||
138 : | movq mm3, mm2 | ||
139 : | punpcklbw mm2, mm0 ; [vyuy|vyuy] | ||
140 : | punpckhbw mm3, mm0 ; [vyuy|vyuy] | ||
141 : | movq [edi], mm2 | ||
142 : | movq [edi+8], mm3 | ||
143 : | |||
144 : | inc eax | ||
145 : | add edi, 16 | ||
146 : | |||
147 : | cmp eax, ebx | ||
148 : | jb .xloop1 | ||
149 : | |||
150 : | add edi, [esp + 0] ; dst += dst_dif | ||
151 : | add esi, [esp + 24 + 24] ; y_src += y_stride | ||
152 : | |||
153 : | xor eax, eax | ||
154 : | |||
155 : | .xloop2 | ||
156 : | movd mm0, [ecx+4*eax] ; [ |uuuu] | ||
157 : | movd mm1, [edx+4*eax] ; [ |vvvv] | ||
158 : | movq mm2, [esi+8*eax] ; [yyyy|yyyy] | ||
159 : | |||
160 : | punpcklbw mm0, mm1 ; [vuvu|vuvu] | ||
161 : | movq mm3, mm2 | ||
162 : | punpcklbw mm2, mm0 ; [vyuy|vyuy] | ||
163 : | punpckhbw mm3, mm0 ; [vyuy|vyuy] | ||
164 : | movq [edi], mm2 | ||
165 : | movq [edi+8], mm3 | ||
166 : | |||
167 : | inc eax | ||
168 : | add edi, 16 | ||
169 : | |||
170 : | cmp eax, ebx | ||
171 : | jb .xloop2 | ||
172 : | |||
173 : | add edi, [esp + 0] ; dst += dst_dif | ||
174 : | add esi, [esp + 24 + 24] ; y_src += y_stride | ||
175 : | add ecx, [esp + 24 + 28] ; u_src += uv_stride | ||
176 : | add edx, [esp + 24 + 28] ; v_src += uv_stride | ||
177 : | |||
178 : | dec ebp ; y-- | ||
179 : | jnz near .yloop | ||
180 : | |||
181 : | emms | ||
182 : | |||
183 : | add esp, 4 | ||
184 : | pop ebp | ||
185 : | pop edi | ||
186 : | pop esi | ||
187 : | pop ecx | ||
188 : | pop ebx | ||
189 : | |||
190 : | ret | ||
191 : | |||
192 : | |||
193 : | |||
194 : | |||
195 : | |||
196 : | ;=========================================================================== | ||
197 : | ; | ||
198 : | ; void yv12_to_uyvy_mmx( | ||
199 : | ; uint8_t * dst, | ||
200 : | ; int dst_stride, | ||
201 : | ; uint8_t * y_src, | ||
202 : | ; uint8_t * u_src, | ||
203 : | ; uint8_t * v_src, | ||
204 : | ; int y_stride, | ||
205 : | ; int uv_stride, | ||
206 : | ; int width, | ||
207 : | ; int height); | ||
208 : | ; | ||
209 : | ; width must be multiple of 8 | ||
210 : | ; ~10% faster than plain c | ||
211 : | ; | ||
212 : | ;=========================================================================== | ||
213 : | |||
214 : | align 16 | ||
215 : | cglobal yv12_to_uyvy_mmx | ||
216 : | yv12_to_uyvy_mmx | ||
217 : | |||
218 : | push ebx | ||
219 : | push ecx | ||
220 : | push esi | ||
221 : | push edi | ||
222 : | push ebp ; STACK BASE = 20 | ||
223 : | |||
224 : | ; global constants | ||
225 : | |||
226 : | mov ebx, [esp + 20 + 32] ; width | ||
227 : | mov eax, [esp + 20 + 8] ; dst_stride | ||
228 : | sub eax, ebx ; | ||
229 : | add eax, eax ; eax = 2*(dst_stride - width) | ||
230 : | push eax ; [esp + 4] = dst_dif | ||
231 : | ; STACK BASE = 24 | ||
232 : | |||
233 : | shr ebx, 3 ; ebx = width / 8 | ||
234 : | mov edi, [esp + 24 + 4] ; dst | ||
235 : | |||
236 : | |||
237 : | ; --------- flip ------------- | ||
238 : | |||
239 : | mov ebp, [esp + 24 + 36] | ||
240 : | test ebp, ebp | ||
241 : | jl .flip | ||
242 : | |||
243 : | mov esi, [esp + 24 + 12] ; y_src | ||
244 : | mov ecx, [esp + 24 + 16] ; u_src | ||
245 : | mov edx, [esp + 24 + 20] ; v_src | ||
246 : | shr ebp, 1 ; y = height / 2 | ||
247 : | jmp short .yloop | ||
248 : | |||
249 : | |||
250 : | .flip | ||
251 : | neg ebp ; height = -height | ||
252 : | |||
253 : | mov eax, [esp + 24 + 24] ; y_stride | ||
254 : | lea edx, [ebp - 1] ; edx = height - 1 | ||
255 : | mul edx | ||
256 : | mov esi, [esp + 24 + 12] ; y_src | ||
257 : | add esi, eax ; y_src += (height - 1) * y_stride | ||
258 : | |||
259 : | shr ebp, 1 ; y = height / 2 | ||
260 : | mov eax, [esp + 24 + 28] ; uv_stride | ||
261 : | lea edx, [ebp - 1] ; edx = height/2 - 1 | ||
262 : | mul edx | ||
263 : | |||
264 : | mov ecx, [esp + 24 + 16] ; u_src | ||
265 : | mov edx, [esp + 24 + 20] ; v_src | ||
266 : | add ecx, eax ; u_src += (height/2 - 1) * uv_stride | ||
267 : | add edx, eax ; v_src += (height/2 - 1) * uv_stride | ||
268 : | |||
269 : | neg dword [esp + 24 + 24] ; y_stride = -y_stride | ||
270 : | neg dword [esp + 24 + 28] ; uv_stride = -uv_stride | ||
271 : | |||
272 : | .yloop | ||
273 : | xor eax, eax ; x = 0; | ||
274 : | |||
275 : | .xloop1 | ||
276 : | movd mm0, [ecx+4*eax] ; [ |uuuu] | ||
277 : | movd mm1, [edx+4*eax] ; [ |vvvv] | ||
278 : | movq mm2, [esi+8*eax] ; [yyyy|yyyy] | ||
279 : | |||
280 : | punpcklbw mm0, mm1 ; [vuvu|vuvu] | ||
281 : | movq mm1, mm0 | ||
282 : | punpcklbw mm0, mm2 ; [yvyu|yvyu] | ||
283 : | punpckhbw mm1, mm2 ; [yvyu|yvyu] | ||
284 : | movq [edi], mm0 | ||
285 : | movq [edi+8], mm1 | ||
286 : | |||
287 : | inc eax | ||
288 : | add edi, 16 | ||
289 : | |||
290 : | cmp eax, ebx | ||
291 : | jb .xloop1 | ||
292 : | |||
293 : | add edi, [esp + 0] ; dst += dst_dif | ||
294 : | add esi, [esp + 24 + 24] ; y_src += y_stride | ||
295 : | |||
296 : | xor eax, eax | ||
297 : | |||
298 : | .xloop2 | ||
299 : | movd mm0, [ecx+4*eax] ; [ |uuuu] | ||
300 : | movd mm1, [edx+4*eax] ; [ |vvvv] | ||
301 : | movq mm2, [esi+8*eax] ; [yyyy|yyyy] | ||
302 : | |||
303 : | punpcklbw mm0, mm1 ; [vuvu|vuvu] | ||
304 : | movq mm1, mm0 | ||
305 : | punpcklbw mm0, mm2 ; [yvyu|yvyu] | ||
306 : | punpckhbw mm1, mm2 ; [yvyu|yvyu] | ||
307 : | |||
308 : | movq [edi], mm0 | ||
309 : | movq [edi+8], mm1 | ||
310 : | |||
311 : | inc eax | ||
312 : | add edi, 16 | ||
313 : | |||
314 : | cmp eax, ebx | ||
315 : | jb .xloop2 | ||
316 : | |||
317 : | add edi, [esp + 0] ; dst += dst_dif | ||
318 : | add esi, [esp + 24 + 24] ; y_src += y_stride | ||
319 : | add ecx, [esp + 24 + 28] ; u_src += uv_stride | ||
320 : | add edx, [esp + 24 + 28] ; v_src += uv_stride | ||
321 : | |||
322 : | dec ebp ; y-- | ||
323 : | jnz near .yloop | ||
324 : | |||
325 : | emms | ||
326 : | |||
327 : | add esp, 4 | ||
328 : | pop ebp | ||
329 : | pop edi | ||
330 : | pop esi | ||
331 : | pop ecx | ||
332 : | pop ebx | ||
333 : | |||
334 : | ret |
No admin address has been configured | ViewVC Help |
Powered by ViewVC 1.0.4 |