20 |
; * along with this program ; if not, write to the Free Software |
; * along with this program ; if not, write to the Free Software |
21 |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
22 |
; * |
; * |
23 |
; * $Id: quantize_mpeg_xmm.asm,v 1.8 2008-11-11 20:46:24 Isibaar Exp $ |
; * $Id: quantize_mpeg_xmm.asm,v 1.9 2008-11-26 01:04:34 Isibaar Exp $ |
24 |
; * |
; * |
25 |
; ***************************************************************************/ |
; ***************************************************************************/ |
26 |
|
|
29 |
|
|
30 |
%define SATURATE |
%define SATURATE |
31 |
|
|
32 |
BITS 32 |
%include "nasm.inc" |
|
|
|
|
%macro cglobal 1 |
|
|
%ifdef PREFIX |
|
|
%ifdef MARK_FUNCS |
|
|
global _%1:function %1.endfunc-%1 |
|
|
%define %1 _%1:function %1.endfunc-%1 |
|
|
%define ENDFUNC .endfunc |
|
|
%else |
|
|
global _%1 |
|
|
%define %1 _%1 |
|
|
%define ENDFUNC |
|
|
%endif |
|
|
%else |
|
|
%ifdef MARK_FUNCS |
|
|
global %1:function %1.endfunc-%1 |
|
|
%define ENDFUNC .endfunc |
|
|
%else |
|
|
global %1 |
|
|
%define ENDFUNC |
|
|
%endif |
|
|
%endif |
|
|
%endmacro |
|
|
|
|
|
%macro cextern 1 |
|
|
%ifdef PREFIX |
|
|
extern _%1 |
|
|
%define %1 _%1 |
|
|
%else |
|
|
extern %1 |
|
|
%endif |
|
|
%endmacro |
|
33 |
|
|
34 |
;============================================================================= |
;============================================================================= |
35 |
; Local data |
; Local data |
36 |
;============================================================================= |
;============================================================================= |
37 |
|
|
38 |
%ifdef FORMAT_COFF |
DATA |
|
SECTION .rodata |
|
|
%else |
|
|
SECTION .rodata align=16 |
|
|
%endif |
|
39 |
|
|
40 |
ALIGN 8 |
ALIGN SECTION_ALIGN |
41 |
mmzero: |
mmzero: |
42 |
dd 0,0 |
dd 0,0 |
43 |
mmx_one: |
mmx_one: |
47 |
; divide by 2Q table |
; divide by 2Q table |
48 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
49 |
|
|
50 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
51 |
mmx_divs: ;i>2 |
mmx_divs: ;i>2 |
52 |
%assign i 1 |
%assign i 1 |
53 |
%rep 31 |
%rep 31 |
55 |
%assign i i+1 |
%assign i i+1 |
56 |
%endrep |
%endrep |
57 |
|
|
58 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
59 |
mmx_div: ;quant>2 |
mmx_div: ;quant>2 |
60 |
times 4 dw 65535 ; the div by 2 formula will overflow for the case |
times 4 dw 65535 ; the div by 2 formula will overflow for the case |
61 |
; quant=1 but we don't care much because quant=1 |
; quant=1 but we don't care much because quant=1 |
71 |
dw (1 << 16) / (%1) + 1 |
dw (1 << 16) / (%1) + 1 |
72 |
%endmacro |
%endmacro |
73 |
|
|
74 |
|
%ifndef ARCH_IS_X86_64 |
75 |
%define nop4 db 08Dh, 074h, 026h,0 |
%define nop4 db 08Dh, 074h, 026h,0 |
|
%define nop3 add esp, byte 0 |
|
|
%define nop2 mov esp, esp |
|
76 |
%define nop7 db 08dh, 02ch, 02dh,0,0,0,0 |
%define nop7 db 08dh, 02ch, 02dh,0,0,0,0 |
77 |
%define nop6 add ebp, dword 0 |
%else |
78 |
|
%define nop4 |
79 |
|
%define nop7 |
80 |
|
%endif |
81 |
|
%define nop3 add _ESP, byte 0 |
82 |
|
%define nop2 mov _ESP, _ESP |
83 |
|
%define nop6 add _EBP, dword 0 |
84 |
|
|
85 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
86 |
; quantd table |
; quantd table |
89 |
%define VM18P 3 |
%define VM18P 3 |
90 |
%define VM18Q 4 |
%define VM18Q 4 |
91 |
|
|
92 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
93 |
quantd: |
quantd: |
94 |
%assign i 1 |
%assign i 1 |
95 |
%rep 31 |
%rep 31 |
101 |
; multiple by 2Q table |
; multiple by 2Q table |
102 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
103 |
|
|
104 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
105 |
mmx_mul_quant: |
mmx_mul_quant: |
106 |
%assign i 1 |
%assign i 1 |
107 |
%rep 31 |
%rep 31 |
113 |
; saturation limits |
; saturation limits |
114 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
115 |
|
|
116 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
117 |
mmx_32767_minus_2047: |
mmx_32767_minus_2047: |
118 |
times 4 dw (32767-2047) |
times 4 dw (32767-2047) |
119 |
mmx_32768_minus_2048: |
mmx_32768_minus_2048: |
137 |
; Code |
; Code |
138 |
;============================================================================= |
;============================================================================= |
139 |
|
|
140 |
SECTION .text |
SECTION .rotext align=SECTION_ALIGN |
141 |
|
|
142 |
cglobal quant_mpeg_inter_xmm |
cglobal quant_mpeg_inter_xmm |
143 |
cglobal dequant_mpeg_intra_3dne |
cglobal dequant_mpeg_intra_3dne |
152 |
; |
; |
153 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
154 |
|
|
155 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
156 |
quant_mpeg_inter_xmm: |
quant_mpeg_inter_xmm: |
157 |
mov eax, [esp + 8] ; data |
mov _EAX, prm2 ; data |
158 |
mov ecx, [esp + 12] ; quant |
mov TMP0, prm3 ; quant |
159 |
mov edx, [esp + 4] ; coeff |
mov TMP1, prm1 ; coeff |
160 |
push esi |
push _ESI |
161 |
push edi |
push _EDI |
162 |
push ebx |
push _EBX |
163 |
nop |
nop |
164 |
mov edi, [esp + 12 + 16] |
%ifdef ARCH_IS_X86_64 |
165 |
mov esi, -14 |
mov _EDI, prm4 |
166 |
mov ebx, esp |
%else |
167 |
sub esp, byte 24 |
mov _EDI, [_ESP + 12 + 16] |
168 |
lea ebx, [esp+8] |
%endif |
169 |
and ebx, byte -8 ;ALIGN 8 |
|
170 |
|
mov _ESI, -14 |
171 |
|
mov _EBX, _ESP |
172 |
|
sub _ESP, byte 24 |
173 |
|
lea _EBX, [_ESP+8] |
174 |
|
and _EBX, byte -8 ;ALIGN 8 |
175 |
pxor mm0, mm0 |
pxor mm0, mm0 |
176 |
pxor mm3, mm3 |
pxor mm3, mm3 |
177 |
movq [byte ebx],mm0 |
movq [byte _EBX],mm0 |
178 |
db 0Fh, 7Fh, 44h, 23h, 8 ;movq [ebx+8],mm0 |
movq [_EBX+8],mm0 |
179 |
cmp ecx, byte 1 |
%if 0 |
180 |
|
cmp TMP0, byte 1 |
181 |
je near .q1loop |
je near .q1loop |
182 |
cmp ecx, byte 19 |
cmp TMP0, byte 19 |
183 |
jg near .lloop |
jg near .lloop |
184 |
nop |
nop |
185 |
|
%endif |
186 |
|
|
187 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
188 |
.loop: |
.loop: |
189 |
movq mm1, [eax + 8*esi+112] ; mm0 = [1st] |
movq mm1, [_EAX + 8*_ESI+112] ; mm0 = [1st] |
190 |
psubw mm0, mm1 ;-mm1 |
psubw mm0, mm1 ;-mm1 |
191 |
movq mm4, [eax + 8*esi + 120] ; |
movq mm4, [_EAX + 8*_ESI + 120] ; |
192 |
psubw mm3, mm4 ;-mm4 |
psubw mm3, mm4 ;-mm4 |
193 |
pmaxsw mm0, mm1 ;|src| |
pmaxsw mm0, mm1 ;|src| |
194 |
pmaxsw mm3, mm4 |
pmaxsw mm3, mm4 |
197 |
psraw mm4, 15 |
psraw mm4, 15 |
198 |
psllw mm0, 4 ; level << 4 |
psllw mm0, 4 ; level << 4 |
199 |
psllw mm3, 4 ; |
psllw mm3, 4 ; |
200 |
paddw mm0, [edi + 640 + 8*esi+112] |
paddw mm0, [_EDI + 640 + 8*_ESI+112] |
201 |
paddw mm3, [edi + 640 + 8*esi+120] |
paddw mm3, [_EDI + 640 + 8*_ESI+120] |
202 |
movq mm5, [edi + 896 + 8*esi+112] |
movq mm5, [_EDI + 896 + 8*_ESI+112] |
203 |
movq mm7, [edi + 896 + 8*esi+120] |
movq mm7, [_EDI + 896 + 8*_ESI+120] |
204 |
pmulhuw mm5, mm0 |
pmulhuw mm5, mm0 |
205 |
pmulhuw mm7, mm3 |
pmulhuw mm7, mm3 |
206 |
mov esp, esp |
mov _ESP, _ESP |
207 |
movq mm2, [edi + 512 + 8*esi+112] |
movq mm2, [_EDI + 512 + 8*_ESI+112] |
208 |
movq mm6, [edi + 512 + 8*esi+120] |
movq mm6, [_EDI + 512 + 8*_ESI+120] |
209 |
pmullw mm2, mm5 |
pmullw mm2, mm5 |
210 |
pmullw mm6, mm7 |
pmullw mm6, mm7 |
211 |
psubw mm0, mm2 |
psubw mm0, mm2 |
212 |
psubw mm3, mm6 |
psubw mm3, mm6 |
213 |
movq mm2, [byte ebx] |
movq mm2, [byte _EBX] |
214 |
movq mm6, [mmx_divs + ecx * 8 - 8] |
%ifdef ARCH_IS_X86_64 |
215 |
pmulhuw mm0, [edi + 768 + 8*esi+112] |
lea r9, [mmx_divs] |
216 |
pmulhuw mm3, [edi + 768 + 8*esi+120] |
movq mm6, [r9 + TMP0 * 8 - 8] |
217 |
paddw mm2, [ebx+8] ;sum |
%else |
218 |
|
movq mm6, [mmx_divs + TMP0 * 8 - 8] |
219 |
|
%endif |
220 |
|
pmulhuw mm0, [_EDI + 768 + 8*_ESI+112] |
221 |
|
pmulhuw mm3, [_EDI + 768 + 8*_ESI+120] |
222 |
|
paddw mm2, [_EBX+8] ;sum |
223 |
paddw mm5, mm0 |
paddw mm5, mm0 |
224 |
paddw mm7, mm3 |
paddw mm7, mm3 |
225 |
pxor mm0, mm0 |
pxor mm0, mm0 |
226 |
pxor mm3, mm3 |
pxor mm3, mm3 |
227 |
pmulhuw mm5, mm6 ; mm0 = (mm0 / 2Q) >> 16 |
pmulhuw mm5, mm6 ; mm0 = (mm0 / 2Q) >> 16 |
228 |
pmulhuw mm7, mm6 ; (level ) / quant (0<quant<32) |
pmulhuw mm7, mm6 ; (level ) / quant (0<quant<32) |
229 |
add esi, byte 2 |
add _ESI, byte 2 |
230 |
paddw mm2, mm5 ;sum += x1 |
paddw mm2, mm5 ;sum += x1 |
231 |
movq [ebx], mm7 ;store x2 |
movq [_EBX], mm7 ;store x2 |
232 |
pxor mm5, mm1 ; mm0 *= sign(mm0) |
pxor mm5, mm1 ; mm0 *= sign(mm0) |
233 |
pxor mm7, mm4 ; |
pxor mm7, mm4 ; |
234 |
psubw mm5, mm1 ; undisplace |
psubw mm5, mm1 ; undisplace |
235 |
psubw mm7, mm4 ; |
psubw mm7, mm4 ; |
236 |
db 0Fh, 7Fh, 54h, 23h, 08 ;movq [ebx+8],mm2 ;store sum |
movq [_EBX+8],mm2 ;store sum |
237 |
movq [edx + 8*esi+112-16], mm5 |
movq [TMP1 + 8*_ESI+112-16], mm5 |
238 |
movq [edx + 8*esi +120-16], mm7 |
movq [TMP1 + 8*_ESI +120-16], mm7 |
239 |
jng near .loop |
jng near .loop |
240 |
|
|
241 |
.done: |
.done: |
242 |
; calculate data[0] // (int32_t)dcscalar) |
; calculate data[0] // (int32_t)dcscalar) |
243 |
paddw mm2, [ebx] |
paddw mm2, [_EBX] |
244 |
mov ebx, [esp+24] |
mov _EBX, [_ESP+24] |
245 |
mov edi, [esp+4+24] |
mov _EDI, [_ESP+PTR_SIZE+24] |
246 |
mov esi, [esp+8+24] |
mov _ESI, [_ESP+2*PTR_SIZE+24] |
247 |
add esp, byte 12+24 |
add _ESP, byte 3*PTR_SIZE+24 |
248 |
pmaddwd mm2, [mmx_one] |
pmaddwd mm2, [mmx_one] |
249 |
punpckldq mm0, mm2 ;get low dw to mm0:high |
punpckldq mm0, mm2 ;get low dw to mm0:high |
250 |
paddd mm0,mm2 |
paddd mm0,mm2 |
253 |
|
|
254 |
ret |
ret |
255 |
|
|
256 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
257 |
.q1loop: |
.q1loop: |
258 |
movq mm1, [eax + 8*esi+112] ; mm0 = [1st] |
movq mm1, [_EAX + 8*_ESI+112] ; mm0 = [1st] |
259 |
psubw mm0, mm1 ;-mm1 |
psubw mm0, mm1 ;-mm1 |
260 |
movq mm4, [eax + 8*esi+120] |
movq mm4, [_EAX + 8*_ESI+120] |
261 |
psubw mm3, mm4 ;-mm4 |
psubw mm3, mm4 ;-mm4 |
262 |
pmaxsw mm0, mm1 ;|src| |
pmaxsw mm0, mm1 ;|src| |
263 |
pmaxsw mm3, mm4 |
pmaxsw mm3, mm4 |
266 |
psraw mm4, 15 |
psraw mm4, 15 |
267 |
psllw mm0, 4 ; level << 4 |
psllw mm0, 4 ; level << 4 |
268 |
psllw mm3, 4 |
psllw mm3, 4 |
269 |
paddw mm0, [edi + 640 + 8*esi+112] ;mm0 is to be divided |
paddw mm0, [_EDI + 640 + 8*_ESI+112] ;mm0 is to be divided |
270 |
paddw mm3, [edi + 640 + 8*esi+120] ; inter1 contains fix for division by 1 |
paddw mm3, [_EDI + 640 + 8*_ESI+120] ; inter1 contains fix for division by 1 |
271 |
movq mm5, [edi + 896 + 8*esi+112] ;with rounding down |
movq mm5, [_EDI + 896 + 8*_ESI+112] ;with rounding down |
272 |
movq mm7, [edi + 896 + 8*esi+120] |
movq mm7, [_EDI + 896 + 8*_ESI+120] |
273 |
pmulhuw mm5, mm0 |
pmulhuw mm5, mm0 |
274 |
pmulhuw mm7, mm3 ;mm7: first approx of division |
pmulhuw mm7, mm3 ;mm7: first approx of division |
275 |
mov esp, esp |
mov _ESP, _ESP |
276 |
movq mm2, [edi + 512 + 8*esi+112] |
movq mm2, [_EDI + 512 + 8*_ESI+112] |
277 |
movq mm6, [edi + 512 + 8*esi+120] ; divs for q<=16 |
movq mm6, [_EDI + 512 + 8*_ESI+120] ; divs for q<=16 |
278 |
pmullw mm2, mm5 ;test value <= original |
pmullw mm2, mm5 ;test value <= original |
279 |
pmullw mm6, mm7 |
pmullw mm6, mm7 |
280 |
psubw mm0, mm2 ;mismatch |
psubw mm0, mm2 ;mismatch |
281 |
psubw mm3, mm6 |
psubw mm3, mm6 |
282 |
movq mm2, [byte ebx] |
movq mm2, [byte _EBX] |
283 |
pmulhuw mm0, [edi + 768 + 8*esi+112] ;correction |
pmulhuw mm0, [_EDI + 768 + 8*_ESI+112] ;correction |
284 |
pmulhuw mm3, [edi + 768 + 8*esi+120] |
pmulhuw mm3, [_EDI + 768 + 8*_ESI+120] |
285 |
paddw mm2, [ebx+8] ;sum |
paddw mm2, [_EBX+8] ;sum |
286 |
paddw mm5, mm0 ;final result |
paddw mm5, mm0 ;final result |
287 |
paddw mm7, mm3 |
paddw mm7, mm3 |
288 |
pxor mm0, mm0 |
pxor mm0, mm0 |
289 |
pxor mm3, mm3 |
pxor mm3, mm3 |
290 |
psrlw mm5, 1 ; (level ) /2 (quant = 1) |
psrlw mm5, 1 ; (level ) /2 (quant = 1) |
291 |
psrlw mm7, 1 |
psrlw mm7, 1 |
292 |
add esi, byte 2 |
add _ESI, byte 2 |
293 |
paddw mm2, mm5 ;sum += x1 |
paddw mm2, mm5 ;sum += x1 |
294 |
movq [ebx], mm7 ;store x2 |
movq [_EBX], mm7 ;store x2 |
295 |
pxor mm5, mm1 ; mm0 *= sign(mm0) |
pxor mm5, mm1 ; mm0 *= sign(mm0) |
296 |
pxor mm7, mm4 ; |
pxor mm7, mm4 ; |
297 |
psubw mm5, mm1 ; undisplace |
psubw mm5, mm1 ; undisplace |
298 |
psubw mm7, mm4 ; |
psubw mm7, mm4 ; |
299 |
movq [ebx+8], mm2 ;store sum |
movq [_EBX+8], mm2 ;store sum |
300 |
movq [edx + 8*esi+112-16], mm5 |
movq [TMP1 + 8*_ESI+112-16], mm5 |
301 |
movq [edx + 8*esi +120-16], mm7 |
movq [TMP1 + 8*_ESI +120-16], mm7 |
302 |
jng near .q1loop |
jng near .q1loop |
303 |
jmp near .done |
jmp near .done |
304 |
|
|
305 |
ALIGN 8 |
ALIGN SECTION_ALIGN |
306 |
.lloop: |
.lloop: |
307 |
movq mm1, [eax + 8*esi+112] ; mm0 = [1st] |
movq mm1, [_EAX + 8*_ESI+112] ; mm0 = [1st] |
308 |
psubw mm0,mm1 ;-mm1 |
psubw mm0,mm1 ;-mm1 |
309 |
movq mm4, [eax + 8*esi+120] |
movq mm4, [_EAX + 8*_ESI+120] |
310 |
psubw mm3,mm4 ;-mm4 |
psubw mm3,mm4 ;-mm4 |
311 |
pmaxsw mm0,mm1 ;|src| |
pmaxsw mm0,mm1 ;|src| |
312 |
pmaxsw mm3,mm4 |
pmaxsw mm3,mm4 |
315 |
psraw mm4,15 |
psraw mm4,15 |
316 |
psllw mm0, 4 ; level << 4 |
psllw mm0, 4 ; level << 4 |
317 |
psllw mm3, 4 ; |
psllw mm3, 4 ; |
318 |
paddw mm0, [edi + 640 + 8*esi+112] ;mm0 is to be divided inter1 contains fix for division by 1 |
paddw mm0, [_EDI + 640 + 8*_ESI+112] ;mm0 is to be divided inter1 contains fix for division by 1 |
319 |
paddw mm3, [edi + 640 + 8*esi+120] |
paddw mm3, [_EDI + 640 + 8*_ESI+120] |
320 |
movq mm5,[edi + 896 + 8*esi+112] |
movq mm5,[_EDI + 896 + 8*_ESI+112] |
321 |
movq mm7,[edi + 896 + 8*esi+120] |
movq mm7,[_EDI + 896 + 8*_ESI+120] |
322 |
pmulhuw mm5,mm0 |
pmulhuw mm5,mm0 |
323 |
pmulhuw mm7,mm3 ;mm7: first approx of division |
pmulhuw mm7,mm3 ;mm7: first approx of division |
324 |
mov esp,esp |
mov _ESP,_ESP |
325 |
movq mm2,[edi + 512 + 8*esi+112] |
movq mm2,[_EDI + 512 + 8*_ESI+112] |
326 |
movq mm6,[edi + 512 + 8*esi+120] |
movq mm6,[_EDI + 512 + 8*_ESI+120] |
327 |
pmullw mm2,mm5 ;test value <= original |
pmullw mm2,mm5 ;test value <= original |
328 |
pmullw mm6,mm7 |
pmullw mm6,mm7 |
329 |
psubw mm0,mm2 ;mismatch |
psubw mm0,mm2 ;mismatch |
330 |
psubw mm3,mm6 |
psubw mm3,mm6 |
331 |
movq mm2,[byte ebx] |
movq mm2,[byte _EBX] |
332 |
movq mm6,[mmx_div + ecx * 8 - 8] ; divs for q<=16 |
%ifdef ARCH_IS_X86_64 |
333 |
pmulhuw mm0,[edi + 768 + 8*esi+112] ;correction |
lea r9, [mmx_div] |
334 |
pmulhuw mm3,[edi + 768 + 8*esi+120] |
movq mm6, [r9 + TMP0 * 8 - 8] |
335 |
paddw mm2,[ebx+8] ;sum |
%else |
336 |
|
movq mm6,[mmx_div + TMP0 * 8 - 8] ; divs for q<=16 |
337 |
|
%endif |
338 |
|
pmulhuw mm0,[_EDI + 768 + 8*_ESI+112] ;correction |
339 |
|
pmulhuw mm3,[_EDI + 768 + 8*_ESI+120] |
340 |
|
paddw mm2,[_EBX+8] ;sum |
341 |
paddw mm5,mm0 ;final result |
paddw mm5,mm0 ;final result |
342 |
paddw mm7,mm3 |
paddw mm7,mm3 |
343 |
pxor mm0,mm0 |
pxor mm0,mm0 |
344 |
pxor mm3,mm3 |
pxor mm3,mm3 |
345 |
pmulhuw mm5, mm6 ; mm0 = (mm0 / 2Q) >> 16 |
pmulhuw mm5, mm6 ; mm0 = (mm0 / 2Q) >> 16 |
346 |
pmulhuw mm7, mm6 ; (level ) / quant (0<quant<32) |
pmulhuw mm7, mm6 ; (level ) / quant (0<quant<32) |
347 |
add esi,byte 2 |
add _ESI,byte 2 |
348 |
psrlw mm5, 1 ; (level ) / (2*quant) |
psrlw mm5, 1 ; (level ) / (2*quant) |
349 |
paddw mm2,mm5 ;sum += x1 |
paddw mm2,mm5 ;sum += x1 |
350 |
psrlw mm7, 1 |
psrlw mm7, 1 |
351 |
movq [ebx],mm7 ;store x2 |
movq [_EBX],mm7 ;store x2 |
352 |
pxor mm5, mm1 ; mm0 *= sign(mm0) |
pxor mm5, mm1 ; mm0 *= sign(mm0) |
353 |
pxor mm7, mm4 ; |
pxor mm7, mm4 ; |
354 |
psubw mm5, mm1 ; undisplace |
psubw mm5, mm1 ; undisplace |
355 |
psubw mm7, mm4 ; |
psubw mm7, mm4 ; |
356 |
db 0Fh, 7Fh, 54h, 23h, 08 ;movq [ebx+8],mm2 ;store sum |
movq [_EBX+8], mm2 ;store sum |
357 |
movq [edx + 8*esi+112-16], mm5 |
movq [TMP1 + 8*_ESI+112-16], mm5 |
358 |
movq [edx + 8*esi +120-16], mm7 |
movq [TMP1 + 8*_ESI +120-16], mm7 |
359 |
jng near .lloop |
jng near .lloop |
360 |
jmp near .done |
jmp near .done |
361 |
ENDFUNC |
ENDFUNC |
382 |
; |
; |
383 |
|
|
384 |
%macro DEQUANT4INTRAMMX 1 |
%macro DEQUANT4INTRAMMX 1 |
385 |
movq mm1, [byte ecx+ 16 * %1] ; mm0 = c = coeff[i] |
movq mm1, [byte TMP0+ 16 * %1] ; mm0 = c = coeff[i] |
386 |
movq mm4, [ecx+ 16 * %1 +8] ; mm3 = c' = coeff[i+1] |
movq mm4, [TMP0+ 16 * %1 +8] ; mm3 = c' = coeff[i+1] |
387 |
psubw mm0, mm1 |
psubw mm0, mm1 |
388 |
psubw mm3, mm4 |
psubw mm3, mm4 |
389 |
pmaxsw mm0, mm1 |
pmaxsw mm0, mm1 |
391 |
psraw mm1, 15 |
psraw mm1, 15 |
392 |
psraw mm4, 15 |
psraw mm4, 15 |
393 |
%if %1 |
%if %1 |
394 |
movq mm2, [eax+8] ;preshifted quant |
movq mm2, [_EAX+8] ;preshifted quant |
395 |
movq mm7, [eax+8] |
movq mm7, [_EAX+8] |
396 |
%endif |
%endif |
397 |
pmullw mm2, [edi + 16 * %1 ] ; matrix[i]*quant |
pmullw mm2, [TMP1 + 16 * %1 ] ; matrix[i]*quant |
398 |
pmullw mm7, [edi + 16 * %1 +8] ; matrix[i+1]*quant |
pmullw mm7, [TMP1 + 16 * %1 +8] ; matrix[i+1]*quant |
399 |
movq mm5, mm0 |
movq mm5, mm0 |
400 |
movq mm6, mm3 |
movq mm6, mm3 |
401 |
pmulhw mm0, mm2 ; high of coeff*(matrix*quant) |
pmulhw mm0, mm2 ; high of coeff*(matrix*quant) |
402 |
pmulhw mm3, mm7 ; high of coeff*(matrix*quant) |
pmulhw mm3, mm7 ; high of coeff*(matrix*quant) |
403 |
pmullw mm2, mm5 ; low of coeff*(matrix*quant) |
pmullw mm2, mm5 ; low of coeff*(matrix*quant) |
404 |
pmullw mm7, mm6 ; low of coeff*(matrix*quant) |
pmullw mm7, mm6 ; low of coeff*(matrix*quant) |
405 |
pcmpgtw mm0, [eax] |
pcmpgtw mm0, [_EAX] |
406 |
pcmpgtw mm3, [eax] |
pcmpgtw mm3, [_EAX] |
407 |
paddusw mm2, mm0 |
paddusw mm2, mm0 |
408 |
paddusw mm7, mm3 |
paddusw mm7, mm3 |
409 |
psrlw mm2, 5 |
psrlw mm2, 5 |
412 |
pxor mm7, mm4 ; start negating back |
pxor mm7, mm4 ; start negating back |
413 |
psubusw mm1, mm0 |
psubusw mm1, mm0 |
414 |
psubusw mm4, mm3 |
psubusw mm4, mm3 |
415 |
movq mm0, [eax] ;zero |
movq mm0, [_EAX] ;zero |
416 |
movq mm3, [eax] ;zero |
movq mm3, [_EAX] ;zero |
417 |
psubw mm2, mm1 ; finish negating back |
psubw mm2, mm1 ; finish negating back |
418 |
psubw mm7, mm4 ; finish negating back |
psubw mm7, mm4 ; finish negating back |
419 |
movq [byte edx + 16 * %1], mm2 ; data[i] |
movq [byte _EDI + 16 * %1], mm2 ; data[i] |
420 |
movq [edx + 16 * %1 +8], mm7 ; data[i+1] |
movq [_EDI + 16 * %1 +8], mm7 ; data[i+1] |
421 |
%endmacro |
%endmacro |
422 |
|
|
423 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
424 |
dequant_mpeg_intra_3dne: |
dequant_mpeg_intra_3dne: |
425 |
mov eax, [esp+12] ; quant |
mov _EAX, prm3 ; quant |
426 |
mov ecx, [esp+8] ; coeff |
%ifdef ARCH_IS_X86_64 |
427 |
movq mm7, [mmx_mul_quant + eax*8 - 8] |
lea TMP0, [mmx_mul_quant] |
428 |
|
movq mm7, [TMP0 + _EAX*8 - 8] |
429 |
|
%else |
430 |
|
movq mm7, [mmx_mul_quant + _EAX*8 - 8] |
431 |
|
%endif |
432 |
|
mov TMP0, prm2 ; coeff |
433 |
psllw mm7, 2 ; << 2. See comment. |
psllw mm7, 2 ; << 2. See comment. |
434 |
mov edx, [esp+4] ; data |
mov TMP1, prm5 ; mpeg_quant_matrices |
435 |
push ebx |
push _EBX |
436 |
movsx ebx, word [ecx] |
movsx _EBX, word [TMP0] |
437 |
pxor mm0, mm0 |
pxor mm0, mm0 |
438 |
pxor mm3, mm3 |
pxor mm3, mm3 |
439 |
push esi |
push _ESI |
440 |
lea eax, [esp-28] |
lea _EAX, [_ESP-28] |
441 |
sub esp, byte 32 |
sub _ESP, byte 32 |
442 |
and eax, byte -8 ;points to qword ALIGNed space on stack |
and _EAX, byte -8 ;points to qword ALIGNed space on stack |
443 |
movq [eax], mm0 |
movq [_EAX], mm0 |
444 |
movq [eax+8], mm7 |
movq [_EAX+8], mm7 |
445 |
imul ebx, [esp+16+8+32] ; dcscalar |
%ifdef ARCH_IS_X86_64 |
446 |
|
imul _EBX, prm4 ; dcscalar |
447 |
|
%else |
448 |
|
imul _EBX, [_ESP+16+8+32] ; dcscalar |
449 |
|
%endif |
450 |
movq mm2, mm7 |
movq mm2, mm7 |
451 |
push edi |
push _EDI |
452 |
mov edi, [esp + 32 + 12 + 20] ; mpeg_quant_matrices |
|
453 |
ALIGN 4 |
%ifdef ARCH_IS_X86_64 |
454 |
|
mov _EDI, prm1 ; data |
455 |
|
%else |
456 |
|
mov _EDI, [_ESP+4+12+32] |
457 |
|
%endif |
458 |
|
|
459 |
|
ALIGN SECTION_ALIGN |
460 |
|
|
461 |
DEQUANT4INTRAMMX 0 |
DEQUANT4INTRAMMX 0 |
462 |
|
|
463 |
mov esi, -2048 |
mov _ESI, -2048 |
464 |
nop |
nop |
465 |
cmp ebx, esi |
cmp _EBX, _ESI |
466 |
|
|
467 |
DEQUANT4INTRAMMX 1 |
DEQUANT4INTRAMMX 1 |
468 |
|
|
469 |
cmovl ebx, esi |
cmovl _EBX, _ESI |
470 |
neg esi |
neg _ESI |
471 |
sub esi, byte 1 ;2047 |
sub _ESI, byte 1 ;2047 |
472 |
|
|
473 |
DEQUANT4INTRAMMX 2 |
DEQUANT4INTRAMMX 2 |
474 |
|
|
475 |
cmp ebx, esi |
cmp _EBX, _ESI |
476 |
cmovg ebx, esi |
cmovg _EBX, _ESI |
477 |
lea ebp, [byte ebp] |
lea _EBP, [byte _EBP] |
478 |
|
|
479 |
DEQUANT4INTRAMMX 3 |
DEQUANT4INTRAMMX 3 |
480 |
|
|
481 |
mov esi, [esp+36] |
mov _ESI, [_ESP+32+PTR_SIZE] |
482 |
mov [byte edx], bx |
mov [byte _EDI], bx |
483 |
mov ebx, [esp+36+4] |
mov _EBX, [_ESP+32+2*PTR_SIZE] |
484 |
|
|
485 |
DEQUANT4INTRAMMX 4 |
DEQUANT4INTRAMMX 4 |
486 |
DEQUANT4INTRAMMX 5 |
DEQUANT4INTRAMMX 5 |
487 |
DEQUANT4INTRAMMX 6 |
DEQUANT4INTRAMMX 6 |
488 |
DEQUANT4INTRAMMX 7 |
DEQUANT4INTRAMMX 7 |
489 |
|
|
490 |
pop edi |
pop _EDI |
491 |
|
|
492 |
add esp, byte 32+8 |
add _ESP, byte 32+2*PTR_SIZE |
493 |
|
|
494 |
xor eax, eax |
xor _EAX, _EAX |
495 |
ret |
ret |
496 |
ENDFUNC |
ENDFUNC |
497 |
|
|
509 |
; sgn(x) is the result of 'pcmpgtw 0,x': 0 if x>=0, -1 if x<0. |
; sgn(x) is the result of 'pcmpgtw 0,x': 0 if x>=0, -1 if x<0. |
510 |
; It's mixed with the extraction of the absolute value. |
; It's mixed with the extraction of the absolute value. |
511 |
|
|
512 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
513 |
dequant_mpeg_inter_3dne: |
dequant_mpeg_inter_3dne: |
514 |
mov edx, [esp+ 4] ; data |
mov _EAX, prm3 ; quant |
515 |
mov ecx, [esp+ 8] ; coeff |
%ifdef ARCH_IS_X86_64 |
516 |
mov eax, [esp+12] ; quant |
lea TMP0, [mmx_mul_quant] |
517 |
movq mm7, [mmx_mul_quant + eax*8 - 8] |
movq mm7, [TMP0 + _EAX*8 - 8] |
518 |
mov eax, -14 |
%else |
519 |
|
movq mm7, [mmx_mul_quant + _EAX*8 - 8] |
520 |
|
%endif |
521 |
|
mov TMP1, prm1 ; data |
522 |
|
mov TMP0, prm2 ; coeff |
523 |
|
mov _EAX, -14 |
524 |
paddw mm7, mm7 ; << 1 |
paddw mm7, mm7 ; << 1 |
525 |
pxor mm6, mm6 ; mismatch sum |
pxor mm6, mm6 ; mismatch sum |
526 |
push esi |
push _ESI |
527 |
push edi |
push _EDI |
528 |
mov esi, mmzero |
mov _ESI, mmzero |
529 |
pxor mm1, mm1 |
pxor mm1, mm1 |
530 |
pxor mm3, mm3 |
pxor mm3, mm3 |
531 |
mov edi, [esp + 8 + 16] ; mpeg_quant_matrices |
%ifdef ARCH_IS_X86_64 |
532 |
|
mov _EDI, prm4 |
533 |
|
%else |
534 |
|
mov _EDI, [_ESP + 8 + 16] ; mpeg_quant_matrices |
535 |
|
%endif |
536 |
nop |
nop |
537 |
nop4 |
nop4 |
538 |
|
|
539 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
540 |
.loop: |
.loop: |
541 |
movq mm0, [ecx+8*eax + 7*16 ] ; mm0 = coeff[i] |
movq mm0, [TMP0+8*_EAX + 7*16 ] ; mm0 = coeff[i] |
542 |
pcmpgtw mm1, mm0 ; mm1 = sgn(c) (preserved) |
pcmpgtw mm1, mm0 ; mm1 = sgn(c) (preserved) |
543 |
movq mm2, [ecx+8*eax + 7*16 +8] ; mm2 = coeff[i+1] |
movq mm2, [TMP0+8*_EAX + 7*16 +8] ; mm2 = coeff[i+1] |
544 |
pcmpgtw mm3, mm2 ; mm3 = sgn(c') (preserved) |
pcmpgtw mm3, mm2 ; mm3 = sgn(c') (preserved) |
545 |
paddsw mm0, mm1 ; c += sgn(c) |
paddsw mm0, mm1 ; c += sgn(c) |
546 |
paddsw mm2, mm3 ; c += sgn(c') |
paddsw mm2, mm3 ; c += sgn(c') |
547 |
paddw mm0, mm0 ; c *= 2 |
paddw mm0, mm0 ; c *= 2 |
548 |
paddw mm2, mm2 ; c'*= 2 |
paddw mm2, mm2 ; c'*= 2 |
549 |
|
|
550 |
movq mm4, [esi] |
movq mm4, [_ESI] |
551 |
movq mm5, [esi] |
movq mm5, [_ESI] |
552 |
psubw mm4, mm0 ; -c |
psubw mm4, mm0 ; -c |
553 |
psubw mm5, mm2 ; -c' |
psubw mm5, mm2 ; -c' |
554 |
|
|
563 |
|
|
564 |
movq mm4, mm7 ; (matrix*quant) |
movq mm4, mm7 ; (matrix*quant) |
565 |
nop |
nop |
566 |
pmullw mm4, [edi + 512 + 8*eax + 7*16] |
pmullw mm4, [_EDI + 512 + 8*_EAX + 7*16] |
567 |
movq mm5, mm4 |
movq mm5, mm4 |
568 |
pmulhw mm5, mm0 ; high of c*(matrix*quant) |
pmulhw mm5, mm0 ; high of c*(matrix*quant) |
569 |
pmullw mm0, mm4 ; low of c*(matrix*quant) |
pmullw mm0, mm4 ; low of c*(matrix*quant) |
570 |
|
|
571 |
movq mm4, mm7 ; (matrix*quant) |
movq mm4, mm7 ; (matrix*quant) |
572 |
pmullw mm4, [edi + 512 + 8*eax + 7*16 + 8] |
pmullw mm4, [_EDI + 512 + 8*_EAX + 7*16 + 8] |
573 |
add eax, byte 2 |
add _EAX, byte 2 |
574 |
|
|
575 |
pcmpgtw mm5, [esi] |
pcmpgtw mm5, [_ESI] |
576 |
paddusw mm0, mm5 |
paddusw mm0, mm5 |
577 |
psrlw mm0, 5 |
psrlw mm0, 5 |
578 |
pxor mm0, mm1 ; start restoring sign |
pxor mm0, mm1 ; start restoring sign |
583 |
pmullw mm2, mm4 ; low of c*(matrix*quant) |
pmullw mm2, mm4 ; low of c*(matrix*quant) |
584 |
psubw mm0, mm1 ; finish restoring sign |
psubw mm0, mm1 ; finish restoring sign |
585 |
|
|
586 |
pcmpgtw mm5, [esi] |
pcmpgtw mm5, [_ESI] |
587 |
paddusw mm2, mm5 |
paddusw mm2, mm5 |
588 |
psrlw mm2, 5 |
psrlw mm2, 5 |
589 |
pxor mm2, mm3 ; start restoring sign |
pxor mm2, mm3 ; start restoring sign |
590 |
psubusw mm3, mm5 |
psubusw mm3, mm5 |
591 |
psubw mm2, mm3 ; finish restoring sign |
psubw mm2, mm3 ; finish restoring sign |
592 |
movq mm1, [esi] |
movq mm1, [_ESI] |
593 |
movq mm3, [byte esi] |
movq mm3, [byte _ESI] |
594 |
pxor mm6, mm0 ; mismatch control |
pxor mm6, mm0 ; mismatch control |
595 |
movq [edx + 8*eax + 7*16 -2*8 ], mm0 ; data[i] |
movq [TMP1 + 8*_EAX + 7*16 -2*8 ], mm0 ; data[i] |
596 |
pxor mm6, mm2 ; mismatch control |
pxor mm6, mm2 ; mismatch control |
597 |
movq [edx + 8*eax + 7*16 -2*8 +8], mm2 ; data[i+1] |
movq [TMP1 + 8*_EAX + 7*16 -2*8 +8], mm2 ; data[i+1] |
598 |
|
|
599 |
jng .loop |
jng .loop |
600 |
nop |
nop |
608 |
pxor mm1, mm2 |
pxor mm1, mm2 |
609 |
pxor mm6, mm1 |
pxor mm6, mm1 |
610 |
movd eax, mm6 |
movd eax, mm6 |
611 |
pop edi |
pop _EDI |
612 |
and eax, byte 1 |
and _EAX, byte 1 |
613 |
xor eax, byte 1 |
xor _EAX, byte 1 |
614 |
mov esi, [esp] |
mov _ESI, [_ESP] |
615 |
add esp, byte 4 |
add _ESP, byte PTR_SIZE |
616 |
xor word [edx + 2*63], ax |
xor word [TMP1 + 2*63], ax |
617 |
|
|
618 |
xor eax, eax |
xor _EAX, _EAX |
619 |
ret |
ret |
620 |
ENDFUNC |
ENDFUNC |
621 |
|
|