3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * - RGB colorspace conversions - |
; * - RGB colorspace conversions - |
5 |
; * |
; * |
6 |
; * Copyright(C) 2002-2003 Michael Militzer <isibaar@xvid.org> |
; * Copyright(C) 2002-2008 Michael Militzer <michael@xvid.org> |
7 |
; * 2002-2003 Peter Ross <pross@xvid.org> |
; * 2002-2003 Peter Ross <pross@xvid.org> |
8 |
; * |
; * |
9 |
; * This program is free software ; you can redistribute it and/or modify |
; * This program is free software ; you can redistribute it and/or modify |
22 |
; * |
; * |
23 |
; ****************************************************************************/ |
; ****************************************************************************/ |
24 |
|
|
25 |
BITS 32 |
%include "nasm.inc" |
|
|
|
|
%macro cglobal 1 |
|
|
%ifdef PREFIX |
|
|
global _%1 |
|
|
%define %1 _%1 |
|
|
%else |
|
|
global %1 |
|
|
%endif |
|
|
%endmacro |
|
26 |
|
|
27 |
;============================================================================= |
;============================================================================= |
28 |
; Some constants |
; Some constants |
48 |
%define V_ADD 128 |
%define V_ADD 128 |
49 |
|
|
50 |
; Scaling used during conversion |
; Scaling used during conversion |
51 |
%define SCALEBITS 6 |
%define SCALEBITS_OUT 6 |
52 |
|
%define SCALEBITS_IN 13 |
53 |
|
|
54 |
|
%define FIX_ROUND (1<<(SCALEBITS_IN-1)) |
55 |
|
|
56 |
;============================================================================= |
;============================================================================= |
57 |
; Read only data |
; Read only data |
58 |
;============================================================================= |
;============================================================================= |
59 |
|
|
60 |
%ifdef FORMAT_COFF |
DATA |
61 |
SECTION .rodata |
|
62 |
%else |
ALIGN SECTION_ALIGN |
|
SECTION .rodata align=16 |
|
|
%endif |
|
|
ALIGN 16 |
|
63 |
|
|
64 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
65 |
; RGB->YV12 multiplication matrices |
; RGB->YV12 multiplication matrices |
66 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
67 |
; FIX(Y_B) FIX(Y_G) FIX(Y_R) Ignored |
; FIX(Y_B) FIX(Y_G) FIX(Y_R) Ignored |
68 |
|
|
69 |
y_mul: dw 25, 129, 66, 0 |
bgr_y_mul: dw 803, 4129, 2105, 0 |
70 |
u_mul: dw 112, -74, -38, 0 |
bgr_u_mul: dw 3596, -2384, -1212, 0 |
71 |
v_mul: dw -18, -94, 112, 0 |
bgr_v_mul: dw -582, -3015, 3596, 0 |
72 |
|
|
73 |
|
;----------------------------------------------------------------------------- |
74 |
|
; BGR->YV12 multiplication matrices |
75 |
|
;----------------------------------------------------------------------------- |
76 |
|
; FIX(Y_R) FIX(Y_G) FIX(Y_B) Ignored |
77 |
|
|
78 |
|
rgb_y_mul: dw 2105, 4129, 803, 0 |
79 |
|
rgb_u_mul: dw -1212, -2384, 3596, 0 |
80 |
|
rgb_v_mul: dw 3596, -3015, -582, 0 |
81 |
|
|
82 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
83 |
; YV12->RGB data |
; YV12->RGB data |
110 |
;------------------------------------------------------------------------------ |
;------------------------------------------------------------------------------ |
111 |
|
|
112 |
%macro BGR_TO_YV12_INIT 2 |
%macro BGR_TO_YV12_INIT 2 |
113 |
movq mm7, [y_mul] |
movq mm7, [bgr_y_mul] |
114 |
%endmacro |
%endmacro |
115 |
|
|
116 |
|
|
117 |
%macro BGR_TO_YV12 2 |
%macro BGR_TO_YV12 2 |
118 |
; y_out |
; y_out |
119 |
|
|
120 |
|
pxor mm4, mm4 |
121 |
|
pxor mm5, mm5 |
122 |
|
movd mm0, [x_ptr] ; x_ptr[0...] |
123 |
|
movd mm2, [x_ptr+x_stride] ; x_ptr[x_stride...] |
124 |
|
punpcklbw mm0, mm4 ; [ |b |g |r ] |
125 |
|
punpcklbw mm2, mm5 ; [ |b |g |r ] |
126 |
|
movq mm6, mm0 ; = [ |b4|g4|r4] |
127 |
|
paddw mm6, mm2 ; +[ |b4|g4|r4] |
128 |
|
pmaddwd mm0, mm7 ; *= Y_MUL |
129 |
|
pmaddwd mm2, mm7 ; *= Y_MUL |
130 |
|
movq mm4, mm0 ; [r] |
131 |
|
movq mm5, mm2 ; [r] |
132 |
|
psrlq mm4, 32 ; +[g] |
133 |
|
psrlq mm5, 32 ; +[g] |
134 |
|
paddd mm0, mm4 ; +[b] |
135 |
|
paddd mm2, mm5 ; +[b] |
136 |
|
|
137 |
|
pxor mm4, mm4 |
138 |
|
pxor mm5, mm5 |
139 |
|
%if %1 == 3 ; BGR (24-bit) |
140 |
|
movd mm1, [x_ptr+2] |
141 |
|
movd mm3, [x_ptr+x_stride+2] |
142 |
|
psrlq mm1, 8 ; src[%1...] |
143 |
|
psrlq mm3, 8 ; src[x_stride+%1...] |
144 |
|
%else |
145 |
|
movd mm1, [x_ptr+%1] ; src[%1...] |
146 |
|
movd mm3, [x_ptr+x_stride+%1] ; src[x_stride+%1...] |
147 |
|
%endif |
148 |
|
punpcklbw mm1, mm4 ; [ |b |g |r ] |
149 |
|
punpcklbw mm3, mm5 ; [ |b |g |r ] |
150 |
|
paddw mm6, mm1 ; +[ |b4|g4|r4] |
151 |
|
paddw mm6, mm3 ; +[ |b4|g4|r4] |
152 |
|
pmaddwd mm1, mm7 ; *= Y_MUL |
153 |
|
pmaddwd mm3, mm7 ; *= Y_MUL |
154 |
|
movq mm4, mm1 ; [r] |
155 |
|
movq mm5, mm3 ; [r] |
156 |
|
psrlq mm4, 32 ; +[g] |
157 |
|
psrlq mm5, 32 ; +[g] |
158 |
|
paddd mm1, mm4 ; +[b] |
159 |
|
paddd mm3, mm5 ; +[b] |
160 |
|
|
161 |
|
push x_stride |
162 |
|
|
163 |
|
movd x_stride_d, mm0 |
164 |
|
add x_stride, FIX_ROUND |
165 |
|
shr x_stride, SCALEBITS_IN |
166 |
|
add x_stride, Y_ADD |
167 |
|
mov [y_ptr], dl ; y_ptr[0] |
168 |
|
|
169 |
|
movd x_stride_d, mm1 |
170 |
|
add x_stride, FIX_ROUND |
171 |
|
shr x_stride, SCALEBITS_IN |
172 |
|
add x_stride, Y_ADD |
173 |
|
mov [y_ptr + 1], dl ; y_ptr[1] |
174 |
|
|
175 |
|
movd x_stride_d, mm2 |
176 |
|
add x_stride, FIX_ROUND |
177 |
|
shr x_stride, SCALEBITS_IN |
178 |
|
add x_stride, Y_ADD |
179 |
|
mov [y_ptr + y_stride + 0], dl ; y_ptr[y_stride + 0] |
180 |
|
|
181 |
|
movd x_stride_d, mm3 |
182 |
|
add x_stride, FIX_ROUND |
183 |
|
shr x_stride, SCALEBITS_IN |
184 |
|
add x_stride, Y_ADD |
185 |
|
mov [y_ptr + y_stride + 1], dl ; y_ptr[y_stride + 1] |
186 |
|
|
187 |
|
; u_ptr, v_ptr |
188 |
|
movq mm0, mm6 ; = [ |b4|g4|r4] |
189 |
|
pmaddwd mm6, [bgr_v_mul] ; *= V_MUL |
190 |
|
pmaddwd mm0, [bgr_u_mul] ; *= U_MUL |
191 |
|
movq mm1, mm0 |
192 |
|
movq mm2, mm6 |
193 |
|
psrlq mm1, 32 |
194 |
|
psrlq mm2, 32 |
195 |
|
paddd mm0, mm1 |
196 |
|
paddd mm2, mm6 |
197 |
|
|
198 |
|
movd x_stride_d, mm0 |
199 |
|
add x_stride, 4*FIX_ROUND |
200 |
|
shr x_stride, (SCALEBITS_IN+2) |
201 |
|
add x_stride, U_ADD |
202 |
|
mov [u_ptr], dl |
203 |
|
|
204 |
|
movd x_stride_d, mm2 |
205 |
|
add x_stride, 4*FIX_ROUND |
206 |
|
shr x_stride, (SCALEBITS_IN+2) |
207 |
|
add x_stride, V_ADD |
208 |
|
mov [v_ptr], dl |
209 |
|
|
210 |
|
pop x_stride |
211 |
|
%endmacro |
212 |
|
|
213 |
|
;------------------------------------------------------------------------------ |
214 |
|
; RGB_TO_YV12( BYTES ) |
215 |
|
; |
216 |
|
; BYTES 3=rgb(24bit), 4=rgba(32-bit) |
217 |
|
; |
218 |
|
; bytes=3/4, pixels = 2, vpixels=2 |
219 |
|
;------------------------------------------------------------------------------ |
220 |
|
|
221 |
|
%macro RGB_TO_YV12_INIT 2 |
222 |
|
movq mm7, [rgb_y_mul] |
223 |
|
%endmacro |
224 |
|
|
225 |
|
|
226 |
|
%macro RGB_TO_YV12 2 |
227 |
|
; y_out |
228 |
pxor mm4, mm4 |
pxor mm4, mm4 |
229 |
pxor mm5, mm5 |
pxor mm5, mm5 |
230 |
movd mm0, [edi] ; x_ptr[0...] |
movd mm0, [x_ptr] ; x_ptr[0...] |
231 |
movd mm2, [edi+edx] ; x_ptr[x_stride...] |
movd mm2, [x_ptr+x_stride] ; x_ptr[x_stride...] |
232 |
punpcklbw mm0, mm4 ; [ |b |g |r ] |
punpcklbw mm0, mm4 ; [ |b |g |r ] |
233 |
punpcklbw mm2, mm5 ; [ |b |g |r ] |
punpcklbw mm2, mm5 ; [ |b |g |r ] |
234 |
movq mm6, mm0 ; = [ |b4|g4|r4] |
movq mm6, mm0 ; = [ |b4|g4|r4] |
244 |
|
|
245 |
pxor mm4, mm4 |
pxor mm4, mm4 |
246 |
pxor mm5, mm5 |
pxor mm5, mm5 |
247 |
movd mm1, [edi+%1] ; src[%1...] |
%if %1 == 3 ; BGR (24-bit) |
248 |
movd mm3, [edi+edx+%1] ; src[x_stride+%1...] |
movd mm1, [x_ptr+2] |
249 |
|
movd mm3, [x_ptr+x_stride+2] |
250 |
|
psrlq mm1, 8 ; src[%1...] |
251 |
|
psrlq mm3, 8 ; src[x_stride+%1...] |
252 |
|
%else |
253 |
|
movd mm1, [x_ptr+%1] ; src[%1...] |
254 |
|
movd mm3, [x_ptr+x_stride+%1] ; src[x_stride+%1...] |
255 |
|
%endif |
256 |
punpcklbw mm1, mm4 ; [ |b |g |r ] |
punpcklbw mm1, mm4 ; [ |b |g |r ] |
257 |
punpcklbw mm3, mm5 ; [ |b |g |r ] |
punpcklbw mm3, mm5 ; [ |b |g |r ] |
258 |
paddw mm6, mm1 ; +[ |b4|g4|r4] |
paddw mm6, mm1 ; +[ |b4|g4|r4] |
266 |
paddd mm1, mm4 ; +[b] |
paddd mm1, mm4 ; +[b] |
267 |
paddd mm3, mm5 ; +[b] |
paddd mm3, mm5 ; +[b] |
268 |
|
|
269 |
push edx |
push x_stride |
270 |
|
|
271 |
movd edx, mm0 |
movd x_stride_d, mm0 |
272 |
shr edx, 8 |
add x_stride, FIX_ROUND |
273 |
add edx, Y_ADD |
shr x_stride, SCALEBITS_IN |
274 |
mov [esi], dl ; y_ptr[0] |
add x_stride, Y_ADD |
275 |
|
mov [y_ptr], dl ; y_ptr[0] |
276 |
movd edx, mm1 |
|
277 |
shr edx, 8 |
movd x_stride_d, mm1 |
278 |
add edx, Y_ADD |
add x_stride, FIX_ROUND |
279 |
mov [esi + 1], dl ; y_ptr[1] |
shr x_stride, SCALEBITS_IN |
280 |
|
add x_stride, Y_ADD |
281 |
movd edx, mm2 |
mov [y_ptr + 1], dl ; y_ptr[1] |
282 |
shr edx, 8 |
|
283 |
add edx, Y_ADD |
movd x_stride_d, mm2 |
284 |
mov [esi + eax + 0], dl ; y_ptr[y_stride + 0] |
add x_stride, FIX_ROUND |
285 |
|
shr x_stride, SCALEBITS_IN |
286 |
movd edx, mm3 |
add x_stride, Y_ADD |
287 |
shr edx, 8 |
mov [y_ptr + y_stride + 0], dl ; y_ptr[y_stride + 0] |
288 |
add edx, Y_ADD |
|
289 |
mov [esi + eax + 1], dl ; y_ptr[y_stride + 1] |
movd x_stride_d, mm3 |
290 |
|
add x_stride, FIX_ROUND |
291 |
|
shr x_stride, SCALEBITS_IN |
292 |
|
add x_stride, Y_ADD |
293 |
|
mov [y_ptr + y_stride + 1], dl ; y_ptr[y_stride + 1] |
294 |
|
|
295 |
; u_ptr, v_ptr |
; u_ptr, v_ptr |
296 |
movq mm0, mm6 ; = [ |b4|g4|r4] |
movq mm0, mm6 ; = [ |b4|g4|r4] |
297 |
pmaddwd mm6, [v_mul] ; *= V_MUL |
pmaddwd mm6, [rgb_v_mul] ; *= V_MUL |
298 |
pmaddwd mm0, [u_mul] ; *= U_MUL |
pmaddwd mm0, [rgb_u_mul] ; *= U_MUL |
299 |
movq mm1, mm0 |
movq mm1, mm0 |
300 |
movq mm2, mm6 |
movq mm2, mm6 |
301 |
psrlq mm1, 32 |
psrlq mm1, 32 |
303 |
paddd mm0, mm1 |
paddd mm0, mm1 |
304 |
paddd mm2, mm6 |
paddd mm2, mm6 |
305 |
|
|
306 |
movd edx, mm0 |
movd x_stride_d, mm0 |
307 |
shr edx, 10 |
add x_stride, 4*FIX_ROUND |
308 |
add edx, U_ADD |
shr x_stride, (SCALEBITS_IN+2) |
309 |
mov [ebx], dl |
add x_stride, U_ADD |
310 |
|
mov [u_ptr], dl |
311 |
movd edx, mm2 |
|
312 |
shr edx, 10 |
movd x_stride_d, mm2 |
313 |
add edx, V_ADD |
add x_stride, 4*FIX_ROUND |
314 |
mov [ecx], dl |
shr x_stride, (SCALEBITS_IN+2) |
315 |
|
add x_stride, V_ADD |
316 |
|
mov [v_ptr], dl |
317 |
|
|
318 |
pop edx |
pop x_stride |
319 |
%endmacro |
%endmacro |
320 |
|
|
321 |
;------------------------------------------------------------------------------ |
;------------------------------------------------------------------------------ |
331 |
%endmacro |
%endmacro |
332 |
|
|
333 |
%macro YV12_TO_BGR 2 |
%macro YV12_TO_BGR 2 |
334 |
%define TEMP_Y1 esp |
%define TEMP_Y1 _ESP |
335 |
%define TEMP_Y2 esp + 8 |
%define TEMP_Y2 _ESP + 8 |
336 |
%define TEMP_G1 esp + 16 |
%define TEMP_G1 _ESP + 16 |
337 |
%define TEMP_G2 esp + 24 |
%define TEMP_G2 _ESP + 24 |
338 |
%define TEMP_B1 esp + 32 |
%define TEMP_B1 _ESP + 32 |
339 |
%define TEMP_B2 esp + 40 |
%define TEMP_B2 _ESP + 40 |
340 |
|
|
341 |
movd mm2, [ebx] ; u_ptr[0] |
movd mm2, [u_ptr] ; u_ptr[0] |
342 |
movd mm3, [ecx] ; v_ptr[0] |
movd mm3, [v_ptr] ; v_ptr[0] |
343 |
punpcklbw mm2, mm7 ; u3u2u1u0 -> mm2 |
punpcklbw mm2, mm7 ; u3u2u1u0 -> mm2 |
344 |
punpcklbw mm3, mm7 ; v3v2v1v0 -> mm3 |
punpcklbw mm3, mm7 ; v3v2v1v0 -> mm3 |
345 |
psubsw mm2, [U_SUB] ; U - 128 |
psubsw mm2, [U_SUB] ; U - 128 |
358 |
paddsw mm2, mm3 |
paddsw mm2, mm3 |
359 |
paddsw mm6, mm0 |
paddsw mm6, mm0 |
360 |
pmullw mm5, [VR_MUL] ; R_ADD -> mm5 |
pmullw mm5, [VR_MUL] ; R_ADD -> mm5 |
361 |
movq mm0, [esi] ; y7y6y5y4y3y2y1y0 -> mm0 |
movq mm0, [y_ptr] ; y7y6y5y4y3y2y1y0 -> mm0 |
362 |
movq mm1, mm0 |
movq mm1, mm0 |
363 |
punpckhbw mm1, mm7 ; y7y6y5y4 -> mm1 |
punpckhbw mm1, mm7 ; y7y6y5y4 -> mm1 |
364 |
punpcklbw mm0, mm7 ; y3y2y1y0 -> mm0 |
punpcklbw mm0, mm7 ; y3y2y1y0 -> mm0 |
370 |
movq [TEMP_Y1], mm0 ; y3y2y1y0 -> mm7 |
movq [TEMP_Y1], mm0 ; y3y2y1y0 -> mm7 |
371 |
psubsw mm1, mm2 ; g7g6g5g4 -> mm1 |
psubsw mm1, mm2 ; g7g6g5g4 -> mm1 |
372 |
psubsw mm0, mm6 ; g3g2g1g0 -> mm0 |
psubsw mm0, mm6 ; g3g2g1g0 -> mm0 |
373 |
psraw mm1, SCALEBITS |
psraw mm1, SCALEBITS_OUT |
374 |
psraw mm0, SCALEBITS |
psraw mm0, SCALEBITS_OUT |
375 |
packuswb mm0, mm1 ;g7g6g5g4g3g2g1g0 -> mm0 |
packuswb mm0, mm1 ;g7g6g5g4g3g2g1g0 -> mm0 |
376 |
movq [TEMP_G1], mm0 |
movq [TEMP_G1], mm0 |
377 |
movq mm0, [esi+eax] ; y7y6y5y4y3y2y1y0 -> mm0 |
movq mm0, [y_ptr+y_stride] ; y7y6y5y4y3y2y1y0 -> mm0 |
378 |
movq mm1, mm0 |
movq mm1, mm0 |
379 |
punpckhbw mm1, mm7 ; y7y6y5y4 -> mm1 |
punpckhbw mm1, mm7 ; y7y6y5y4 -> mm1 |
380 |
punpcklbw mm0, mm7 ; y3y2y1y0 -> mm0 |
punpcklbw mm0, mm7 ; y3y2y1y0 -> mm0 |
386 |
psubsw mm1, mm2 ; g7g6g5g4 -> mm1 |
psubsw mm1, mm2 ; g7g6g5g4 -> mm1 |
387 |
movq mm2, mm0 |
movq mm2, mm0 |
388 |
psubsw mm0, mm6 ; g3g2g1g0 -> mm0 |
psubsw mm0, mm6 ; g3g2g1g0 -> mm0 |
389 |
psraw mm1, SCALEBITS |
psraw mm1, SCALEBITS_OUT |
390 |
psraw mm0, SCALEBITS |
psraw mm0, SCALEBITS_OUT |
391 |
packuswb mm0, mm1 ; g7g6g5g4g3g2g1g0 -> mm0 |
packuswb mm0, mm1 ; g7g6g5g4g3g2g1g0 -> mm0 |
392 |
movq [TEMP_G2], mm0 |
movq [TEMP_G2], mm0 |
393 |
movq mm0, mm4 |
movq mm0, mm4 |
397 |
paddsw mm3, mm4 ; b7b6b5b4 -> mm3 |
paddsw mm3, mm4 ; b7b6b5b4 -> mm3 |
398 |
movq mm7, mm2 ; y3y2y1y0 -> mm7 |
movq mm7, mm2 ; y3y2y1y0 -> mm7 |
399 |
paddsw mm2, mm0 ; b3b2b1b0 -> mm2 |
paddsw mm2, mm0 ; b3b2b1b0 -> mm2 |
400 |
psraw mm3, SCALEBITS |
psraw mm3, SCALEBITS_OUT |
401 |
psraw mm2, SCALEBITS |
psraw mm2, SCALEBITS_OUT |
402 |
packuswb mm2, mm3 ; b7b6b5b4b3b2b1b0 -> mm2 |
packuswb mm2, mm3 ; b7b6b5b4b3b2b1b0 -> mm2 |
403 |
movq [TEMP_B2], mm2 |
movq [TEMP_B2], mm2 |
404 |
movq mm3, [TEMP_Y2] |
movq mm3, [TEMP_Y2] |
407 |
paddsw mm3, mm4 ; b7b6b5b4 -> mm3 |
paddsw mm3, mm4 ; b7b6b5b4 -> mm3 |
408 |
movq mm4, mm2 ; TEMP_Y1 -> mm4 |
movq mm4, mm2 ; TEMP_Y1 -> mm4 |
409 |
paddsw mm2, mm0 ; b3b2b1b0 -> mm2 |
paddsw mm2, mm0 ; b3b2b1b0 -> mm2 |
410 |
psraw mm3, SCALEBITS |
psraw mm3, SCALEBITS_OUT |
411 |
psraw mm2, SCALEBITS |
psraw mm2, SCALEBITS_OUT |
412 |
packuswb mm2, mm3 ; b7b6b5b4b3b2b1b0 -> mm2 |
packuswb mm2, mm3 ; b7b6b5b4b3b2b1b0 -> mm2 |
413 |
movq [TEMP_B1], mm2 |
movq [TEMP_B1], mm2 |
414 |
movq mm0, mm5 |
movq mm0, mm5 |
416 |
punpcklwd mm0, mm0 ; v1v1v0v0 -> mm0 |
punpcklwd mm0, mm0 ; v1v1v0v0 -> mm0 |
417 |
paddsw mm1, mm5 ; r7r6r5r4 -> mm1 |
paddsw mm1, mm5 ; r7r6r5r4 -> mm1 |
418 |
paddsw mm7, mm0 ; r3r2r1r0 -> mm7 |
paddsw mm7, mm0 ; r3r2r1r0 -> mm7 |
419 |
psraw mm1, SCALEBITS |
psraw mm1, SCALEBITS_OUT |
420 |
psraw mm7, SCALEBITS |
psraw mm7, SCALEBITS_OUT |
421 |
packuswb mm7, mm1 ; r7r6r5r4r3r2r1r0 -> mm7 (TEMP_R2) |
packuswb mm7, mm1 ; r7r6r5r4r3r2r1r0 -> mm7 (TEMP_R2) |
422 |
paddsw mm6, mm5 ; r7r6r5r4 -> mm6 |
paddsw mm6, mm5 ; r7r6r5r4 -> mm6 |
423 |
paddsw mm4, mm0 ; r3r2r1r0 -> mm4 |
paddsw mm4, mm0 ; r3r2r1r0 -> mm4 |
424 |
psraw mm6, SCALEBITS |
psraw mm6, SCALEBITS_OUT |
425 |
psraw mm4, SCALEBITS |
psraw mm4, SCALEBITS_OUT |
426 |
packuswb mm4, mm6 ; r7r6r5r4r3r2r1r0 -> mm4 (TEMP_R1) |
packuswb mm4, mm6 ; r7r6r5r4r3r2r1r0 -> mm4 (TEMP_R1) |
427 |
movq mm0, [TEMP_B1] |
movq mm0, [TEMP_B1] |
428 |
movq mm1, [TEMP_G1] |
movq mm1, [TEMP_G1] |
441 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
442 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
443 |
%if %1 == 3 ; BGR (24-bit) |
%if %1 == 3 ; BGR (24-bit) |
444 |
movd [edi], mm2 |
movd [x_ptr], mm2 |
445 |
psrlq mm2, 32 |
psrlq mm2, 32 |
446 |
movd [edi + 3], mm2 |
movd [x_ptr + 3], mm2 |
447 |
movd [edi + 6], mm4 |
movd [x_ptr + 6], mm4 |
448 |
psrlq mm4, 32 |
psrlq mm4, 32 |
449 |
movd [edi + 9], mm4 |
movd [x_ptr + 9], mm4 |
450 |
movd [edi + 12], mm0 |
movd [x_ptr + 12], mm0 |
451 |
psrlq mm0, 32 |
psrlq mm0, 32 |
452 |
movd [edi + 15], mm0 |
movd [x_ptr + 15], mm0 |
453 |
movq mm2, mm5 |
movq mm2, mm5 |
454 |
psrlq mm0, 8 ; 000000r5g5 -> mm0 |
psrlq mm0, 8 ; 000000r5g5 -> mm0 |
455 |
psllq mm2, 32 ; 0r6g6b60000 -> mm2 |
psllq mm2, 32 ; 0r6g6b60000 -> mm2 |
458 |
por mm0, mm2 ; 000r6g6b6r5g5 -> mm0 |
por mm0, mm2 ; 000r6g6b6r5g5 -> mm0 |
459 |
psllq mm5, 40 ; r7g7b700000 -> mm5 |
psllq mm5, 40 ; r7g7b700000 -> mm5 |
460 |
por mm5, mm0 ; r7g7b7r6g6b6r5g5 -> mm5 |
por mm5, mm0 ; r7g7b7r6g6b6r5g5 -> mm5 |
461 |
movq [edi + 16], mm5 |
movq [x_ptr + 16], mm5 |
462 |
movq mm0, [TEMP_B2] |
movq mm0, [TEMP_B2] |
463 |
movq mm1, [TEMP_G2] |
movq mm1, [TEMP_G2] |
464 |
movq mm2, mm0 |
movq mm2, mm0 |
473 |
movq mm5, mm0 |
movq mm5, mm0 |
474 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
475 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
476 |
movd [edi+edx], mm2 |
movd [x_ptr+x_stride], mm2 |
477 |
psrlq mm2, 32 |
psrlq mm2, 32 |
478 |
movd [edi+edx + 3], mm2 |
movd [x_ptr+x_stride + 3], mm2 |
479 |
movd [edi+edx + 6], mm4 |
movd [x_ptr+x_stride + 6], mm4 |
480 |
psrlq mm4, 32 |
psrlq mm4, 32 |
481 |
movd [edi+edx + 9], mm4 |
movd [x_ptr+x_stride + 9], mm4 |
482 |
movd [edi+edx + 12], mm0 |
movd [x_ptr+x_stride + 12], mm0 |
483 |
psrlq mm0, 32 |
psrlq mm0, 32 |
484 |
movd [edi+edx + 15], mm0 |
movd [x_ptr+x_stride + 15], mm0 |
485 |
movq mm2, mm5 |
movq mm2, mm5 |
486 |
psrlq mm0, 8 ; 000000r5g5 -> mm0 |
psrlq mm0, 8 ; 000000r5g5 -> mm0 |
487 |
psllq mm2, 32 ; 0r6g6b60000 -> mm2 |
psllq mm2, 32 ; 0r6g6b60000 -> mm2 |
490 |
por mm0, mm2 ; 000r6g6b6r5g5 -> mm0 |
por mm0, mm2 ; 000r6g6b6r5g5 -> mm0 |
491 |
psllq mm5, 40 ; r7g7b700000 -> mm5 |
psllq mm5, 40 ; r7g7b700000 -> mm5 |
492 |
por mm5, mm0 ; r7g7b7r6g6b6r5g5 -> mm5 |
por mm5, mm0 ; r7g7b7r6g6b6r5g5 -> mm5 |
493 |
movq [edi + edx + 16], mm5 |
movq [x_ptr + x_stride + 16], mm5 |
494 |
|
|
495 |
%else ; BGRA (32-bit) |
%else ; BGRA (32-bit) |
496 |
movq [edi], mm2 |
movq [x_ptr], mm2 |
497 |
movq [edi + 8], mm4 |
movq [x_ptr + 8], mm4 |
498 |
movq [edi + 16], mm0 |
movq [x_ptr + 16], mm0 |
499 |
movq [edi + 24], mm5 |
movq [x_ptr + 24], mm5 |
500 |
movq mm0, [TEMP_B2] |
movq mm0, [TEMP_B2] |
501 |
movq mm1, [TEMP_G2] |
movq mm1, [TEMP_G2] |
502 |
movq mm2, mm0 |
movq mm2, mm0 |
511 |
movq mm5, mm0 |
movq mm5, mm0 |
512 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
513 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
514 |
movq [edi + edx], mm2 |
movq [x_ptr + x_stride], mm2 |
515 |
movq [edi + edx + 8], mm4 |
movq [x_ptr + x_stride + 8], mm4 |
516 |
movq [edi + edx + 16], mm0 |
movq [x_ptr + x_stride + 16], mm0 |
517 |
movq [edi + edx + 24], mm5 |
movq [x_ptr + x_stride + 24], mm5 |
518 |
%endif |
%endif |
519 |
|
|
520 |
%undef TEMP_Y1 |
%undef TEMP_Y1 |
529 |
; Code |
; Code |
530 |
;============================================================================= |
;============================================================================= |
531 |
|
|
532 |
SECTION .text |
TEXT |
533 |
|
|
534 |
%include "colorspace_mmx.inc" |
%include "colorspace_mmx.inc" |
535 |
|
|
536 |
; input |
; input |
537 |
MAKE_COLORSPACE bgr_to_yv12_mmx,0, 3,2,2, BGR_TO_YV12, 3, -1 |
MAKE_COLORSPACE bgr_to_yv12_mmx,0, 3,2,2, BGR_TO_YV12, 3, -1 |
538 |
MAKE_COLORSPACE bgra_to_yv12_mmx,0, 4,2,2, BGR_TO_YV12, 4, -1 |
MAKE_COLORSPACE bgra_to_yv12_mmx,0, 4,2,2, BGR_TO_YV12, 4, -1 |
539 |
|
MAKE_COLORSPACE rgb_to_yv12_mmx,0, 3,2,2, RGB_TO_YV12, 3, -1 |
540 |
|
MAKE_COLORSPACE rgba_to_yv12_mmx,0, 4,2,2, RGB_TO_YV12, 4, -1 |
541 |
|
|
542 |
; output |
; output |
543 |
MAKE_COLORSPACE yv12_to_bgr_mmx,48, 3,8,2, YV12_TO_BGR, 3, -1 |
MAKE_COLORSPACE yv12_to_bgr_mmx,48, 3,8,2, YV12_TO_BGR, 3, -1 |
544 |
MAKE_COLORSPACE yv12_to_bgra_mmx,48, 4,8,2, YV12_TO_BGR, 4, -1 |
MAKE_COLORSPACE yv12_to_bgra_mmx,48, 4,8,2, YV12_TO_BGR, 4, -1 |
545 |
|
|
546 |
|
NON_EXEC_STACK |