1 |
|
// ------------------------------------------------------------------------------ |
2 |
|
// * |
3 |
|
// * Optimized Assembler Versions of sad8 and sad16 |
4 |
|
// * |
5 |
|
// ------------------------------------------------------------------------------ |
6 |
|
// * |
7 |
|
// * Hannes Jütting and Christopher Özbek |
8 |
|
// * {s_juetti,s_oezbek}@ira.uka.de |
9 |
|
// * |
10 |
|
// * Programmed for the IA64 laboratory held at University Karlsruhe 2002 |
11 |
|
// * http://www.info.uni-karlsruhe.de/~rubino/ia64p/ |
12 |
|
// * |
13 |
|
// ------------------------------------------------------------------------------ |
14 |
|
// * |
15 |
|
// * These are the optimized assembler versions of sad8 and sad16, which calculate |
16 |
|
// * the sum of absolute differences between two 8x8/16x16 block matrices. |
17 |
|
// * |
18 |
|
// * Our approach uses: |
19 |
|
// * - The Itanium command psad1, which solves the problem in hardware. |
20 |
|
// * - Modulo-Scheduled Loops as the best way to loop unrolling on the IA64 |
21 |
|
// * EPIC architecture |
22 |
|
// * - Alignment resolving to avoid memory faults |
23 |
|
// * |
24 |
|
// ------------------------------------------------------------------------------ |
25 |
|
|
26 |
.text |
.text |
27 |
|
|
28 |
|
// ------------------------------------------------------------------------------ |
29 |
|
// * SAD16_IA64 |
30 |
|
// * |
31 |
|
// * In: |
32 |
|
// * r32 = cur (aligned) |
33 |
|
// * r33 = ref (not aligned) |
34 |
|
// * r34 = stride |
35 |
|
// * r35 = bestsad |
36 |
|
// * Out: |
37 |
|
// * r8 = sum of absolute differences |
38 |
|
// * |
39 |
|
// ------------------------------------------------------------------------------ |
40 |
|
|
41 |
.align 16 |
.align 16 |
42 |
.global sad16_ia64# |
.global sad16_ia64# |
43 |
.proc sad16_ia64# |
.proc sad16_ia64# |
44 |
sad16_ia64: |
sad16_ia64: |
45 |
|
|
|
_LL=3 |
|
|
_SL=1 |
|
|
_OL=1 |
|
|
_PL=1 |
|
|
_AL=1 |
|
46 |
|
|
47 |
alloc r9=ar.pfs,4,44,0,48 |
// Define Latencies |
48 |
|
LL16=3 // load latency |
49 |
|
SL16=1 // shift latency |
50 |
|
OL16=1 // or latency |
51 |
|
PL16=1 // psad latency |
52 |
|
AL16=1 // add latency |
53 |
|
|
54 |
mov r8 = r0 |
// Allocate Registern in RSE |
55 |
|
alloc r9=ar.pfs,4,36,0,40 |
56 |
|
|
57 |
mov r20 = ar.lc |
// lfetch [r32] // might help |
|
mov r21 = pr |
|
58 |
|
|
59 |
dep.z r22 = r32, 3, 3 // erste 3 Bit mit 8 multiplizieren |
mov r8 = r0 // clear the return reg |
|
dep.z r23 = r33, 3, 3 // in r22 und r23 -> Schiebeflags |
|
60 |
|
|
61 |
and r14 = -8, r32 // Parameter in untere Register kopieren |
// Save LC and predicates |
62 |
and r15 = -8, r33 // Ref Cur mit 11111...1000 and-en |
mov r20 = ar.lc |
63 |
mov r16 = r34 |
mov r21 = pr |
|
mov r17 = r35 |
|
|
;; |
|
|
add r18 = 8, r14 // Adressenvorausberechnen |
|
|
add r19 = 8, r15 |
|
64 |
|
|
65 |
sub r24 = 64, r22 // Schiftanzahl ausrechnen |
dep.z r23 = r33, 3, 3 // get the # of bits ref is misaligned |
66 |
sub r25 = 64, r23 |
and r15 = -8, r33 // align the ref pointer by deleting the last 3 bit |
67 |
|
|
68 |
add r26 = 16, r14 // Adressenvorausberechnen |
mov r14 = r32 // save the cur pointer |
69 |
add r27 = 16, r15 |
mov r16 = r34 // save stride |
70 |
|
mov r17 = r35 // save bestsad |
71 |
|
|
72 |
// Loop-counter initialisieren |
;; |
73 |
mov ar.lc = 15 // Loop 16 mal durchlaufen |
add r18 = 8, r14 // precalc second cur pointer |
74 |
mov ar.ec = _LL + _SL + _OL + _PL + _AL + _AL // Die Loop am Schluss noch neun mal durchlaufen |
add r19 = 8, r15 // precalc second ref pointer |
75 |
|
add r27 = 16, r15 // precalc third ref pointer |
76 |
|
sub r25 = 64, r23 // # of right shifts |
77 |
|
|
78 |
// Rotating Predicate Register zuruecksetzen und P16 auf 1 |
// Initialize Loop-counters |
79 |
mov pr.rot = 1 << 16 |
mov ar.lc = 15 // loop 16 times |
80 |
|
mov ar.ec = LL16 + SL16 + OL16 + PL16 + AL16 + AL16 |
81 |
|
mov pr.rot = 1 << 16 // reseting rotating predicate regs and set p16 to 1 |
82 |
;; |
;; |
83 |
|
|
84 |
// Array-Konstrukte initialisieren |
// Intialize Arrays for Register Rotation |
85 |
.rotr _ald1[_LL+1], _ald2[_LL+1], _ald3[_LL+1], _ald4[_LL+1], _ald5[_LL+1], _ald6[_LL+1], _shru1[_SL+1], _shl1[_SL+1], _shru2[_SL], _shl2[_SL], _shru3[_SL], _shl3[_SL], _shru4[_SL], _shl4[_SL+1], _or1[_OL], _or2[_OL], _or3[_OL], _or4[_OL+1], _psadr1[_PL+1], _psadr2[_PL+1], _addr1[_AL+1] |
.rotr r_cur_ld1[LL16+SL16+OL16+1], r_cur_ld2[LL16+SL16+OL16+1], r_ref_16_ld1[LL16+1], r_ref_16_ld2[LL16+1], r_ref_16_ld3[LL16+1], r_ref_16_shru1[SL16], r_ref_16_shl1[SL16], r_ref_16_shru2[SL16], r_ref_16_shl2[SL16+1], r_ref_16_or1[OL16], r_ref_16_or2[OL16+1], r_psad1[PL16+1], r_psad2[PL16+1], r_add_16[AL16+1] |
86 |
.rotp _aldp[_LL], _shp[_SL], _orp[_OL], _psadrp[_PL], _addrp1[_AL], _addrp2[_AL] |
.rotp p_ld_16[LL16], p_sh_16[SL16], p_or_16[OL16], p_psad_16[PL16], p_add1_16[AL16], p_add2_16[AL16] |
87 |
|
|
88 |
.L_loop_16: |
.L_loop16: |
89 |
{.mmi |
{.mmi |
90 |
(_aldp[0]) ld8 _ald1[0] = [r14], r16 // Cur Erste 8 Byte |
(p_ld_16[0]) ld8 r_cur_ld1[0] = [r14], r16 // Cur load first 8 Byte |
91 |
(_aldp[0]) ld8 _ald2[0] = [r18], r16 // Cur Zweite 8 Byte |
(p_ld_16[0]) ld8 r_cur_ld2[0] = [r18], r16 // Cur load next 8 Byte |
92 |
(_psadrp[0]) psad1 _psadr1[0] = _or2[0], _or4[0] // Psadden |
(p_psad_16[0]) psad1 r_psad1[0] = r_cur_ld1[LL16+SL16+OL16], r_ref_16_or2[0] // psad of cur and ref |
93 |
} |
} |
94 |
{.mmi |
{.mmi |
95 |
(_aldp[0]) ld8 _ald3[0] = [r26], r16 // Cur Dritte 8 Byte |
(p_ld_16[0]) ld8 r_ref_16_ld1[0] = [r15], r16 // Ref load first 8 Byte (unaligned) |
96 |
(_aldp[0]) ld8 _ald4[0] = [r15], r16 // Ref Erste 8 Byte |
(p_ld_16[0]) ld8 r_ref_16_ld2[0] = [r19], r16 // Ref load next 8 Byte (unaligned) |
97 |
(_psadrp[0]) psad1 _psadr2[0] = _or3[0], _or4[_OL] // _or2 +1 |
(p_psad_16[0]) psad1 r_psad2[0] = r_cur_ld2[LL16+SL16+OL16], r_ref_16_or2[OL16] // psad of cur_2 and ref_2 |
|
} |
|
|
{.mmi |
|
|
(_aldp[0]) ld8 _ald5[0] = [r19], r16 // Ref Zweite 8 Byte |
|
|
(_aldp[0]) ld8 _ald6[0] = [r27], r16 // Ref Dritte 8 Byte |
|
|
(_shp[0]) shr.u _shru1[0] = _ald1[_LL], r22 |
|
98 |
} |
} |
99 |
{.mii |
{.mii |
100 |
(_orp[0]) or _or1[0] = _shl2[0], _shru3[0] // _shru2 + 1 und _shl2 + 1 |
(p_ld_16[0]) ld8 r_ref_16_ld3[0] = [r27], r16 // Ref load third 8 Byte (unaligned) |
101 |
(_shp[0]) shl _shl1[0] = _ald2[_LL], r24 |
(p_or_16[0]) or r_ref_16_or1[0] = r_ref_16_shl1[0], r_ref_16_shru2[0] // Ref or r_ref_16_shl1 + 1 and r_ref_16_shl1 + 1 |
102 |
(_shp[0]) shr.u _shru2[0] = _ald2[_LL], r22 |
(p_sh_16[0]) shr.u r_ref_16_shru1[0] = r_ref_16_ld1[LL16], r23 // Ref shift |
103 |
} |
} |
104 |
{.mii |
{.mii |
105 |
(_orp[0]) or _or2[0] = _shl3[0], _shru4[0] // _shru3 + 1 und _shl3 + 1 |
(p_or_16[0]) or r_ref_16_or2[0] = r_ref_16_shl2[0], r_ref_16_shl2[SL16] // Ref or r_ref_shru2 + 1 and r_ref_shl2 + 1 |
106 |
(_shp[0]) shl _shl2[0] = _ald3[_LL], r24 |
(p_sh_16[0]) shl r_ref_16_shl1[0] = r_ref_16_ld2[LL16], r25 // Ref shift |
107 |
(_shp[0]) shr.u _shru3[0] = _ald4[_LL], r23 |
(p_sh_16[0]) shr.u r_ref_16_shru2[0] = r_ref_16_ld2[LL16], r23 // Ref shift |
108 |
} |
} |
109 |
{.mii |
{.mib |
110 |
(_orp[0]) or _or3[0] = _shl4[0], _shl4[_SL] //_shru4 + 1 und _shl4 + 1 |
(p_add2_16[0]) cmp.ge.unc p6, p7 = r8, r17 |
111 |
(_shp[0]) shl _shl3[0] = _ald5[_LL], r25 |
(p_sh_16[0]) shl r_ref_16_shl2[0]= r_ref_16_ld3[LL16], r25 // Ref shift |
112 |
(_shp[0]) shr.u _shru4[0] = _ald5[_LL], r23 |
(p6) br.spnt.few .L_loop_exit16 |
|
} |
|
|
{.mmi |
|
|
(_orp[0]) or _or4[0] = _shru1[_SL], _shl1[_SL] |
|
|
(_shp[0]) shl _shl4[0]= _ald6[_LL], r25 |
|
113 |
} |
} |
114 |
{.mmb |
{.mmb |
115 |
(_addrp1[0]) add _addr1[0] = _psadr1[_PL], _psadr2[_PL] // Aufsummieren |
(p_add1_16[0]) add r_add_16[0] = r_psad1[PL16], r_psad2[PL16] // add the psad results |
116 |
(_addrp2[0]) add r8 = r8, _addr1[_AL] |
(p_add2_16[0]) add r8 = r8, r_add_16[AL16] // add the results to the sum |
117 |
br.ctop.sptk.few .L_loop_16 |
br.ctop.sptk.few .L_loop16 |
118 |
;; |
;; |
119 |
} |
} |
120 |
// Register zurueckschreiben |
.L_loop_exit16: |
121 |
|
|
122 |
|
// Restore LC and predicates |
123 |
mov ar.lc = r20 |
mov ar.lc = r20 |
124 |
mov pr = r21,-1 |
mov pr = r21,-1 |
125 |
|
|
126 |
|
// Return |
127 |
br.ret.sptk.many rp |
br.ret.sptk.many rp |
128 |
.endp sad16_ia64# |
.endp sad16_ia64# |
129 |
|
|
130 |
|
// ------------------------------------------------------------------------------ |
131 |
|
// * SAD8_IA64 |
132 |
|
// * |
133 |
|
// * In: |
134 |
|
// * r32 = cur (aligned) |
135 |
|
// * r33 = ref (not aligned) |
136 |
|
// * r34 = stride |
137 |
|
// * Out: |
138 |
|
// * r8 = sum of absolute differences |
139 |
|
// * |
140 |
|
// ------------------------------------------------------------------------------ |
141 |
|
|
142 |
.align 16 |
.align 16 |
143 |
.global sad8_ia64# |
.global sad8_ia64# |
145 |
|
|
146 |
sad8_ia64: |
sad8_ia64: |
147 |
|
|
|
LL=3 |
|
|
SL=1 |
|
|
OL=1 |
|
|
PL=1 |
|
|
AL=1 |
|
148 |
|
|
149 |
alloc r9=ar.pfs,3,29,0,32 |
// Define Latencies |
150 |
mov r20 = ar.lc |
LL8=3 // load latency |
151 |
mov r21 = pr |
SL8=1 // shift latency |
152 |
|
OL8=1 // or latency |
153 |
|
PL8=1 // psad latency |
154 |
|
AL8=1 // add latency |
155 |
|
|
156 |
dep.z r22 = r32, 3, 3 // erste 3 Bit mit 8 multiplizieren |
// Allocate Registers in RSE |
157 |
dep.z r23 = r33, 3, 3 // in r22 und r23 -> Schiebeflags |
alloc r9 = ar.pfs,3,21,0,24 |
158 |
|
|
159 |
mov r8 = r0 // . . . . |
// lfetch [r32] // Maybe this helps? |
160 |
and r14 = -8, r32 // 0xFFFFFFFFFFFFFFF8, r32 |
|
161 |
and r15 = -8, r33 // 0xFFFFFFFFFFFFFFF8, r33 |
mov r8 = r0 // Initialize result |
|
mov r16 = r34 |
|
|
// mov r17 = r35 |
|
|
;; |
|
162 |
|
|
163 |
add r18 = 8, r14 |
mov r14 = r32 // Save Cur |
164 |
add r19 = 8, r15 |
and r15 = -8, r33 // Align the Ref pointer by deleting the last 3 bit |
165 |
|
mov r16 = r34 // Save Stride |
166 |
|
|
167 |
sub r24 = 64, r22 |
// Save LC and predicates |
168 |
sub r25 = 64, r23 |
mov r20 = ar.lc |
169 |
|
mov r21 = pr |
170 |
|
|
171 |
// Loop-counter initialisieren |
dep.z r23 = r33, 3, 3 // get the # of bits ref is misaligned |
|
mov ar.lc = 7 // Loop 7 mal durchlaufen |
|
|
mov ar.ec = LL + SL + OL + PL + AL // Die Loop am Schluss noch zehn mal durchlaufen |
|
172 |
|
|
|
// Rotating Predicate Register zuruecksetzen und P16 auf 1 |
|
|
mov pr.rot = 1 << 16 |
|
173 |
;; |
;; |
174 |
.rotr ald1[LL+1], ald2[LL+1], ald3[LL+1], ald4[LL+1], shru1[SL+1], shl1[SL+1], shru2[SL+1], shl2[SL+1], or1[OL+1], or2[OL+1], psadr[PL+1], addr[AL+1] |
|
175 |
.rotp aldp[LL], shp[SL], orp[OL], psadrp[PL], addrp[AL] |
add r19 = 8, r15 // Precalculate second load-offset |
176 |
.L_loop_8: |
sub r25 = 64, r23 // Precalculate # of shifts |
177 |
{.mmi |
|
178 |
(aldp[0]) ld8 ald1[0] = [r14], r16 // Cur laden |
// Initialize Loop-Counters |
179 |
(aldp[0]) ld8 ald2[0] = [r18], r16 |
mov ar.lc = 7 // Loop 7 times |
180 |
(shp[0]) shr.u shru1[0] = ald1[LL], r22 // mergen |
mov ar.ec = LL8 + SL8 + OL8 + PL8 + AL8 // Epiloque |
181 |
} |
mov pr.rot = 1 << 16 // Reset Predicate Registers and initialize with P16 |
182 |
{.mii |
|
183 |
(orp[0]) or or1[0] = shru1[SL], shl1[SL] |
// Initalize Arrays for Register Rotation |
184 |
(shp[0]) shl shl1[0] = ald2[LL], r24 |
.rotr r_cur_ld[LL8+SL8+OL8+1], r_ref_ld1[LL8+1], r_ref_ld2[LL8+1], r_shru[SL8+1], r_shl[SL8+1], r_or[OL8+1], r_psad[PL8+1] |
185 |
(shp[0]) shr.u shru2[0] = ald3[LL], r23 // mergen |
.rotp p_ld[LL8], p_sh[SL8], p_or[OL8], p_psad[PL8], p_add[AL8] |
186 |
} |
|
187 |
{.mmi |
;; |
188 |
(aldp[0]) ld8 ald3[0] = [r15], r16 // Ref laden |
.L_loop8: |
189 |
(aldp[0]) ld8 ald4[0] = [r19], r16 |
// {.mmi |
190 |
(shp[0]) shl shl2[0] = ald4[LL], r25 |
(p_ld[0]) ld8 r_ref_ld1[0] = [r15], r16 // Load 1st 8Byte from Ref |
191 |
} |
(p_ld[0]) ld8 r_cur_ld[0] = [r14], r16 // Load Cur |
192 |
{.mmi |
(p_psad[0]) psad1 r_psad[0] = r_cur_ld[LL8+SL8+OL8], r_or[OL8] // Do the Calculation |
193 |
(orp[0]) or or2[0] = shru2[SL], shl2[SL] |
// } |
194 |
(addrp[0]) add r8 = r8, psadr[PL] |
// {.mii |
195 |
(psadrp[0]) psad1 psadr[0] = or1[OL], or2[OL] |
(p_ld[0]) ld8 r_ref_ld2[0] = [r19], r16 // Load 2nd 8Byte from Ref |
196 |
} |
(p_sh[0]) shr.u r_shru[0] = r_ref_ld1[LL8], r23 // Shift unaligned Ref parts |
197 |
{.mbb |
(p_sh[0]) shl r_shl[0] = r_ref_ld2[LL8], r25 // Shift unaligned Ref parts |
198 |
br.ctop.sptk.few .L_loop_8 |
// } |
199 |
|
// {.mib |
200 |
|
(p_or[0]) or r_or[0] = r_shru[SL8], r_shl[SL8] // Combine unaligned Ref parts |
201 |
|
(p_add[0]) add r8 = r8, r_psad[PL8] // Sum psad result |
202 |
|
br.ctop.sptk.few .L_loop8 |
203 |
;; |
;; |
204 |
} |
// } |
205 |
|
|
206 |
|
// Restore Loop counters |
207 |
mov ar.lc = r20 |
mov ar.lc = r20 |
208 |
mov pr = r21,-1 |
mov pr = r21,-1 |
209 |
|
|
210 |
|
// Return |
211 |
br.ret.sptk.many b0 |
br.ret.sptk.many b0 |
212 |
.endp sad8_ia64# |
.endp sad8_ia64# |
213 |
|
|